aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-platform-at9125
-rw-r--r--Documentation/DocBook/device-drivers.tmpl4
-rw-r--r--Documentation/DocBook/dvb/dvbapi.xml2
-rw-r--r--Documentation/DocBook/media.tmpl4
-rw-r--r--Documentation/DocBook/v4l/dev-rds.xml6
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml3
-rw-r--r--Documentation/feature-removal-schedule.txt15
-rw-r--r--Documentation/lguest/lguest.c73
-rw-r--r--Documentation/lguest/lguest.txt5
-rw-r--r--Documentation/networking/batman-adv.txt16
-rw-r--r--Documentation/networking/bonding.txt83
-rw-r--r--Documentation/sound/alsa/soc/codec.txt45
-rw-r--r--Documentation/sound/alsa/soc/machine.txt38
-rw-r--r--Documentation/sound/alsa/soc/platform.txt12
-rw-r--r--Documentation/video4linux/v4l2-controls.txt12
-rw-r--r--MAINTAINERS82
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig19
-rw-r--r--arch/arm/configs/ag5evm_defconfig2
-rw-r--r--arch/arm/configs/am200epdkit_defconfig2
-rw-r--r--arch/arm/configs/at572d940hfek_defconfig2
-rw-r--r--arch/arm/configs/badge4_defconfig2
-rw-r--r--arch/arm/configs/bcmring_defconfig2
-rw-r--r--arch/arm/configs/cm_x2xx_defconfig2
-rw-r--r--arch/arm/configs/colibri_pxa270_defconfig2
-rw-r--r--arch/arm/configs/collie_defconfig2
-rw-r--r--arch/arm/configs/corgi_defconfig2
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/dove_defconfig2
-rw-r--r--arch/arm/configs/ebsa110_defconfig2
-rw-r--r--arch/arm/configs/edb7211_defconfig2
-rw-r--r--arch/arm/configs/em_x270_defconfig2
-rw-r--r--arch/arm/configs/ep93xx_defconfig2
-rw-r--r--arch/arm/configs/eseries_pxa_defconfig2
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/footbridge_defconfig2
-rw-r--r--arch/arm/configs/fortunet_defconfig2
-rw-r--r--arch/arm/configs/h5000_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/ixp2000_defconfig2
-rw-r--r--arch/arm/configs/ixp23xx_defconfig2
-rw-r--r--arch/arm/configs/ixp4xx_defconfig2
-rw-r--r--arch/arm/configs/loki_defconfig2
-rw-r--r--arch/arm/configs/lpd7a400_defconfig2
-rw-r--r--arch/arm/configs/lpd7a404_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/mv78xx0_defconfig2
-rw-r--r--arch/arm/configs/mx1_defconfig2
-rw-r--r--arch/arm/configs/mx21_defconfig2
-rw-r--r--arch/arm/configs/mx27_defconfig2
-rw-r--r--arch/arm/configs/mx3_defconfig2
-rw-r--r--arch/arm/configs/mx51_defconfig2
-rw-r--r--arch/arm/configs/nhk8815_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/orion5x_defconfig2
-rw-r--r--arch/arm/configs/pcm027_defconfig2
-rw-r--r--arch/arm/configs/pcontrol_g20_defconfig2
-rw-r--r--arch/arm/configs/pleb_defconfig2
-rw-r--r--arch/arm/configs/pnx4008_defconfig2
-rw-r--r--arch/arm/configs/simpad_defconfig2
-rw-r--r--arch/arm/configs/spitz_defconfig2
-rw-r--r--arch/arm/configs/stmp378x_defconfig2
-rw-r--r--arch/arm/configs/stmp37xx_defconfig2
-rw-r--r--arch/arm/configs/tct_hammer_defconfig2
-rw-r--r--arch/arm/configs/trizeps4_defconfig2
-rw-r--r--arch/arm/configs/u300_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/xcep_defconfig2
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c4
-rw-r--r--arch/arm/mach-omap1/Kconfig2
-rw-r--r--arch/arm/mach-omap1/Makefile3
-rw-r--r--arch/arm/mach-omap1/time.c101
-rw-r--r--arch/arm/mach-omap1/timer32k.c13
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c29
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c3
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c1
-rw-r--r--arch/arm/mach-omap2/clockdomain.c30
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain2xxx_3xxx.c1
-rw-r--r--arch/arm/mach-omap2/timer-gp.c10
-rw-r--r--arch/arm/plat-omap/Kconfig8
-rw-r--r--arch/arm/plat-omap/counter_32k.c22
-rw-r--r--arch/arm/plat-omap/dma.c7
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/avr32/Kconfig4
-rw-r--r--arch/blackfin/Kconfig17
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-AD7160-EVAL_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-TLL6527M_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT-SMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig2
-rw-r--r--arch/blackfin/configs/DNP5370_defconfig2
-rw-r--r--arch/blackfin/configs/H8606_defconfig2
-rw-r--r--arch/blackfin/configs/IP0X_defconfig2
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig2
-rw-r--r--arch/blackfin/configs/SRV1_defconfig2
-rw-r--r--arch/blackfin/configs/TCM-BF518_defconfig2
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig2
-rw-r--r--arch/cris/Kconfig6
-rw-r--r--arch/cris/arch-v10/kernel/irq.c41
-rw-r--r--arch/cris/arch-v32/kernel/irq.c50
-rw-r--r--arch/cris/configs/artpec_3_defconfig2
-rw-r--r--arch/cris/configs/etrax-100lx_v2_defconfig2
-rw-r--r--arch/cris/configs/etraxfs_defconfig2
-rw-r--r--arch/cris/kernel/irq.c6
-rw-r--r--arch/frv/Kconfig9
-rw-r--r--arch/frv/defconfig2
-rw-r--r--arch/h8300/Kconfig6
-rw-r--r--arch/h8300/defconfig2
-rw-r--r--arch/h8300/kernel/irq.c50
-rw-r--r--arch/ia64/Kconfig26
-rw-r--r--arch/m32r/Kconfig11
-rw-r--r--arch/m32r/configs/m32700ut.smp_defconfig2
-rw-r--r--arch/m32r/configs/m32700ut.up_defconfig2
-rw-r--r--arch/m32r/configs/mappi.nommu_defconfig2
-rw-r--r--arch/m32r/configs/mappi.smp_defconfig2
-rw-r--r--arch/m32r/configs/mappi.up_defconfig2
-rw-r--r--arch/m32r/configs/mappi2.opsp_defconfig2
-rw-r--r--arch/m32r/configs/mappi2.vdec2_defconfig2
-rw-r--r--arch/m32r/configs/mappi3.smp_defconfig2
-rw-r--r--arch/m32r/configs/oaks32r_defconfig2
-rw-r--r--arch/m32r/configs/opsput_defconfig2
-rw-r--r--arch/m32r/configs/usrv_defconfig2
-rw-r--r--arch/m32r/kernel/irq.c10
-rw-r--r--arch/m32r/platforms/m32104ut/setup.c58
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c214
-rw-r--r--arch/m32r/platforms/mappi/setup.c78
-rw-r--r--arch/m32r/platforms/mappi2/setup.c89
-rw-r--r--arch/m32r/platforms/mappi3/setup.c92
-rw-r--r--arch/m32r/platforms/oaks32r/setup.c65
-rw-r--r--arch/m32r/platforms/opsput/setup.c220
-rw-r--r--arch/m32r/platforms/usrv/setup.c115
-rw-r--r--arch/m68knommu/Kconfig9
-rw-r--r--arch/m68knommu/configs/m5208evb_defconfig2
-rw-r--r--arch/m68knommu/configs/m5249evb_defconfig2
-rw-r--r--arch/m68knommu/configs/m5272c3_defconfig2
-rw-r--r--arch/m68knommu/configs/m5275evb_defconfig2
-rw-r--r--arch/m68knommu/configs/m5307c3_defconfig2
-rw-r--r--arch/m68knommu/configs/m5407c3_defconfig2
-rw-r--r--arch/m68knommu/defconfig2
-rw-r--r--arch/microblaze/Kconfig11
-rw-r--r--arch/microblaze/configs/mmu_defconfig2
-rw-r--r--arch/microblaze/configs/nommu_defconfig2
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/Kconfig.debug2
-rw-r--r--arch/mips/configs/ar7_defconfig2
-rw-r--r--arch/mips/configs/bcm47xx_defconfig2
-rw-r--r--arch/mips/configs/bcm63xx_defconfig2
-rw-r--r--arch/mips/configs/bigsur_defconfig2
-rw-r--r--arch/mips/configs/capcella_defconfig2
-rw-r--r--arch/mips/configs/cavium-octeon_defconfig2
-rw-r--r--arch/mips/configs/cobalt_defconfig2
-rw-r--r--arch/mips/configs/db1000_defconfig2
-rw-r--r--arch/mips/configs/db1100_defconfig2
-rw-r--r--arch/mips/configs/db1200_defconfig2
-rw-r--r--arch/mips/configs/db1500_defconfig2
-rw-r--r--arch/mips/configs/db1550_defconfig2
-rw-r--r--arch/mips/configs/decstation_defconfig2
-rw-r--r--arch/mips/configs/e55_defconfig2
-rw-r--r--arch/mips/configs/fuloong2e_defconfig2
-rw-r--r--arch/mips/configs/gpr_defconfig2
-rw-r--r--arch/mips/configs/ip22_defconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig2
-rw-r--r--arch/mips/configs/ip28_defconfig2
-rw-r--r--arch/mips/configs/ip32_defconfig2
-rw-r--r--arch/mips/configs/jazz_defconfig2
-rw-r--r--arch/mips/configs/jmr3927_defconfig2
-rw-r--r--arch/mips/configs/lasat_defconfig2
-rw-r--r--arch/mips/configs/lemote2f_defconfig2
-rw-r--r--arch/mips/configs/malta_defconfig2
-rw-r--r--arch/mips/configs/markeins_defconfig2
-rw-r--r--arch/mips/configs/mipssim_defconfig2
-rw-r--r--arch/mips/configs/mpc30x_defconfig2
-rw-r--r--arch/mips/configs/msp71xx_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mips/configs/pb1100_defconfig2
-rw-r--r--arch/mips/configs/pb1200_defconfig2
-rw-r--r--arch/mips/configs/pb1500_defconfig2
-rw-r--r--arch/mips/configs/pb1550_defconfig2
-rw-r--r--arch/mips/configs/pnx8335-stb225_defconfig2
-rw-r--r--arch/mips/configs/pnx8550-jbs_defconfig2
-rw-r--r--arch/mips/configs/pnx8550-stb810_defconfig2
-rw-r--r--arch/mips/configs/powertv_defconfig2
-rw-r--r--arch/mips/configs/rb532_defconfig2
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig2
-rw-r--r--arch/mips/configs/rm200_defconfig2
-rw-r--r--arch/mips/configs/sb1250-swarm_defconfig2
-rw-r--r--arch/mips/configs/tb0219_defconfig2
-rw-r--r--arch/mips/configs/tb0226_defconfig2
-rw-r--r--arch/mips/configs/tb0287_defconfig2
-rw-r--r--arch/mips/configs/workpad_defconfig2
-rw-r--r--arch/mips/configs/wrppmc_defconfig2
-rw-r--r--arch/mips/configs/yosemite_defconfig2
-rw-r--r--arch/mn10300/Kconfig8
-rw-r--r--arch/mn10300/configs/asb2303_defconfig2
-rw-r--r--arch/mn10300/configs/asb2364_defconfig2
-rw-r--r--arch/parisc/Kconfig18
-rw-r--r--arch/parisc/configs/a500_defconfig2
-rw-r--r--arch/parisc/configs/c3000_defconfig2
-rw-r--r--arch/powerpc/Kconfig28
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/mpc8308rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts4
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig2
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig2
-rw-r--r--arch/powerpc/configs/40x/hcu4_defconfig2
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig2
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig2
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig2
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig2
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig2
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig2
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig2
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig2
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig2
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig2
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig2
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig2
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig2
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig2
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig2
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/sbc8560_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/sbc8641d_defconfig2
-rw-r--r--arch/powerpc/configs/adder875_defconfig2
-rw-r--r--arch/powerpc/configs/e55xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig2
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig2
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/holly_defconfig2
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig2
-rw-r--r--arch/powerpc/configs/mgsuvd_defconfig2
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig2
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig2
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig2
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig7
-rw-r--r--arch/powerpc/configs/storcenter_defconfig2
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig2
-rw-r--r--arch/powerpc/configs/wii_defconfig2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h27
-rw-r--r--arch/powerpc/include/asm/immap_qe.h21
-rw-r--r--arch/powerpc/include/asm/irqflags.h40
-rw-r--r--arch/powerpc/include/asm/machdep.h18
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h14
-rw-r--r--arch/powerpc/include/asm/spu.h8
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S6
-rw-r--r--arch/powerpc/kernel/cputable.c23
-rw-r--r--arch/powerpc/kernel/crash.c72
-rw-r--r--arch/powerpc/kernel/entry_32.S11
-rw-r--r--arch/powerpc/kernel/machine_kexec.c19
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c1
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/rtas_flash.c53
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/traps.c12
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S19
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h2
-rw-r--r--arch/powerpc/platforms/83xx/usb.c21
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c20
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c5
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c70
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c27
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c11
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c11
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c10
-rw-r--r--arch/powerpc/platforms/pseries/ras.c102
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c6
-rw-r--r--arch/score/Kconfig10
-rw-r--r--arch/score/configs/spct6600_defconfig2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sparc/Kconfig9
-rw-r--r--arch/tile/Kconfig55
-rw-r--r--arch/tile/Kconfig.debug2
-rw-r--r--arch/tile/configs/tile_defconfig2
-rw-r--r--arch/um/Kconfig.common6
-rw-r--r--arch/um/Kconfig.um3
-rw-r--r--arch/um/defconfig2
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/include/asm/cacheflush.h42
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/numa_32.h2
-rw-r--r--arch/x86/include/asm/numa_64.h1
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c1
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/numa.c22
-rw-r--r--arch/x86/mm/numa_64.c24
-rw-r--r--arch/x86/mm/srat_32.c1
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/p2m.c20
-rw-r--r--arch/xtensa/configs/common_defconfig2
-rw-r--r--arch/xtensa/configs/iss_defconfig2
-rw-r--r--arch/xtensa/configs/s6105_defconfig2
-rw-r--r--block/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h16
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h10
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c64
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c10
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c8
-rw-r--r--drivers/acpi/acpica/nsalloc.c15
-rw-r--r--drivers/acpi/acpica/nsdump.c17
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c4
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psparse.c27
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c9
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/nvs.c7
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/char/Kconfig12
-rw-r--r--drivers/char/Makefile13
-rw-r--r--drivers/char/tpm/tpm.c10
-rw-r--r--drivers/char/tpm/tpm_tis.c6
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/core-card.c11
-rw-r--r--drivers/firewire/net.c9
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/hid/Kconfig64
-rw-r--r--drivers/hid/usbhid/Kconfig2
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/idle/intel_idle.c8
-rw-r--r--drivers/infiniband/hw/mthca/Kconfig2
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig2
-rw-r--r--drivers/input/Kconfig6
-rw-r--r--drivers/input/keyboard/Kconfig4
-rw-r--r--drivers/input/mouse/Kconfig10
-rw-r--r--drivers/input/serio/Kconfig6
-rw-r--r--drivers/input/touchscreen/Kconfig30
-rw-r--r--drivers/leds/ledtrig-gpio.c15
-rw-r--r--drivers/lguest/page_tables.c2
-rw-r--r--drivers/lguest/x86/core.c4
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/media/common/saa7146_core.c2
-rw-r--r--drivers/media/common/saa7146_fops.c8
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c20
-rw-r--r--drivers/media/common/tuners/Kconfig2
-rw-r--r--drivers/media/common/tuners/tda8290.c130
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c6
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c9
-rw-r--r--drivers/media/dvb/frontends/Kconfig2
-rw-r--r--drivers/media/dvb/frontends/af9013.c4
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c36
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c2
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c1
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c478
-rw-r--r--drivers/media/radio/radio-maxiradio.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c9
-rw-r--r--drivers/media/rc/ene_ir.c23
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/imon.c60
-rw-r--r--drivers/media/rc/ir-raw.c2
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c52
-rw-r--r--drivers/media/rc/mceusb.c3
-rw-r--r--drivers/media/video/Kconfig11
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7175.c11
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c39
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/cafe_ccic.c11
-rw-r--r--drivers/media/video/cpia2/cpia2.h2
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c65
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c104
-rw-r--r--drivers/media/video/cx18/cx18-driver.c24
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-streams.h3
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c5
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c22
-rw-r--r--drivers/media/video/davinci/vpif.c177
-rw-r--r--drivers/media/video/davinci/vpif.h18
-rw-r--r--drivers/media/video/davinci/vpif_capture.c451
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.c474
-rw-r--r--drivers/media/video/davinci/vpif_display.h2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c19
-rw-r--r--drivers/media/video/et61x251/et61x251.h24
-rw-r--r--drivers/media/video/gspca/benq.c2
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/cpia1.c2
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/finepix.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/video/gspca/gspca.c210
-rw-r--r--drivers/media/video/gspca/gspca.h2
-rw-r--r--drivers/media/video/gspca/jeilinj.c2
-rw-r--r--drivers/media/video/gspca/jpeg.h4
-rw-r--r--drivers/media/video/gspca/konica.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c2
-rw-r--r--drivers/media/video/gspca/mars.c2
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/ov534.c29
-rw-r--r--drivers/media/video/gspca/ov534_9.c2
-rw-r--r--drivers/media/video/gspca/pac207.c2
-rw-r--r--drivers/media/video/gspca/pac7302.c4
-rw-r--r--drivers/media/video/gspca/pac7311.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/sonixb.c270
-rw-r--r--drivers/media/video/gspca/sonixj.c155
-rw-r--r--drivers/media/video/gspca/spca1528.c2
-rw-r--r--drivers/media/video/gspca/spca500.c2
-rw-r--r--drivers/media/video/gspca/spca501.c2
-rw-r--r--drivers/media/video/gspca/spca505.c2
-rw-r--r--drivers/media/video/gspca/spca508.c2
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/sq905.c2
-rw-r--r--drivers/media/video/gspca/sq905c.c2
-rw-r--r--drivers/media/video/gspca/sq930x.c2
-rw-r--r--drivers/media/video/gspca/stk014.c2
-rw-r--r--drivers/media/video/gspca/stv0680.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/gspca/sunplus.c2
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/gspca/tv8532.c2
-rw-r--r--drivers/media/video/gspca/vc032x.c2
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c2
-rw-r--r--drivers/media/video/hdpvr/Makefile4
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c10
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c143
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c7
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h5
-rw-r--r--drivers/media/video/ir-kbd-i2c.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c9
-rw-r--r--drivers/media/video/mt9v011.c54
-rw-r--r--drivers/media/video/mt9v011.h36
-rw-r--r--drivers/media/video/ov7670.c74
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c62
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c51
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c80
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h74
-rw-r--r--drivers/media/video/sr030pc30.c10
-rw-r--r--drivers/media/video/tda9875.c411
-rw-r--r--drivers/media/video/tlg2300/pd-video.c13
-rw-r--r--drivers/media/video/v4l2-common.c19
-rw-r--r--drivers/media/video/v4l2-ctrls.c34
-rw-r--r--drivers/media/video/v4l2-dev.c9
-rw-r--r--drivers/media/video/v4l2-device.c16
-rw-r--r--drivers/media/video/v4l2-ioctl.c20
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/zoran/zoran_card.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c29
-rw-r--r--drivers/net/bnx2.h5
-rw-r--r--drivers/net/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h118
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2727
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c133
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/bonding/bond_3ad.c4
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c359
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c893
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/cnic.c182
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c52
-rw-r--r--drivers/net/e1000e/ich8lan.c3
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c117
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enic/enic.h6
-rw-r--r--drivers/net/enic/enic_main.c10
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/igb/e1000_82575.c1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h1
-rw-r--r--drivers/net/tg3.c258
-rw-r--r--drivers/net/tg3.h16
-rw-r--r--drivers/net/tlan.c3773
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c19
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c32
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c216
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/xen-netfront.c96
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/rapidio/rio-scan.c2
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/interface.c61
-rw-r--r--drivers/s390/net/qeth_l2_main.c18
-rw-r--r--drivers/s390/net/qeth_l3_main.c22
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/staging/lirc/TODO.lirc_zilog36
-rw-r--r--drivers/staging/lirc/lirc_imon.c1
-rw-r--r--drivers/staging/lirc/lirc_it87.c1
-rw-r--r--drivers/staging/lirc/lirc_parallel.c19
-rw-r--r--drivers/staging/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/lirc/lirc_serial.c3
-rw-r--r--drivers/staging/lirc/lirc_sir.c1
-rw-r--r--drivers/staging/lirc/lirc_zilog.c650
-rw-r--r--drivers/staging/tm6000/tm6000-video.c46
-rw-r--r--drivers/tty/Makefile2
-rw-r--r--drivers/tty/hvc/Makefile13
-rw-r--r--drivers/tty/hvc/hvc_beat.c (renamed from drivers/char/hvc_beat.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.c (renamed from drivers/char/hvc_console.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.h (renamed from drivers/char/hvc_console.h)0
-rw-r--r--drivers/tty/hvc/hvc_dcc.c (renamed from drivers/char/hvc_dcc.c)0
-rw-r--r--drivers/tty/hvc/hvc_irq.c (renamed from drivers/char/hvc_irq.c)0
-rw-r--r--drivers/tty/hvc/hvc_iseries.c (renamed from drivers/char/hvc_iseries.c)0
-rw-r--r--drivers/tty/hvc/hvc_iucv.c (renamed from drivers/char/hvc_iucv.c)0
-rw-r--r--drivers/tty/hvc/hvc_rtas.c (renamed from drivers/char/hvc_rtas.c)0
-rw-r--r--drivers/tty/hvc/hvc_tile.c (renamed from drivers/char/hvc_tile.c)0
-rw-r--r--drivers/tty/hvc/hvc_udbg.c (renamed from drivers/char/hvc_udbg.c)0
-rw-r--r--drivers/tty/hvc/hvc_vio.c (renamed from drivers/char/hvc_vio.c)0
-rw-r--r--drivers/tty/hvc/hvc_xen.c (renamed from drivers/char/hvc_xen.c)0
-rw-r--r--drivers/tty/hvc/hvcs.c (renamed from drivers/char/hvcs.c)0
-rw-r--r--drivers/tty/hvc/hvsi.c (renamed from drivers/char/hvsi.c)0
-rw-r--r--drivers/tty/hvc/virtio_console.c (renamed from drivers/char/virtio_console.c)0
-rw-r--r--drivers/tty/serial/21285.c (renamed from drivers/serial/21285.c)0
-rw-r--r--drivers/tty/serial/68328serial.c (renamed from drivers/serial/68328serial.c)0
-rw-r--r--drivers/tty/serial/68328serial.h (renamed from drivers/serial/68328serial.h)0
-rw-r--r--drivers/tty/serial/68360serial.c (renamed from drivers/serial/68360serial.c)0
-rw-r--r--drivers/tty/serial/8250.c (renamed from drivers/serial/8250.c)0
-rw-r--r--drivers/tty/serial/8250.h (renamed from drivers/serial/8250.h)0
-rw-r--r--drivers/tty/serial/8250_accent.c (renamed from drivers/serial/8250_accent.c)0
-rw-r--r--drivers/tty/serial/8250_acorn.c (renamed from drivers/serial/8250_acorn.c)0
-rw-r--r--drivers/tty/serial/8250_boca.c (renamed from drivers/serial/8250_boca.c)0
-rw-r--r--drivers/tty/serial/8250_early.c (renamed from drivers/serial/8250_early.c)0
-rw-r--r--drivers/tty/serial/8250_exar_st16c554.c (renamed from drivers/serial/8250_exar_st16c554.c)0
-rw-r--r--drivers/tty/serial/8250_fourport.c (renamed from drivers/serial/8250_fourport.c)0
-rw-r--r--drivers/tty/serial/8250_gsc.c (renamed from drivers/serial/8250_gsc.c)0
-rw-r--r--drivers/tty/serial/8250_hp300.c (renamed from drivers/serial/8250_hp300.c)0
-rw-r--r--drivers/tty/serial/8250_hub6.c (renamed from drivers/serial/8250_hub6.c)0
-rw-r--r--drivers/tty/serial/8250_mca.c (renamed from drivers/serial/8250_mca.c)0
-rw-r--r--drivers/tty/serial/8250_pci.c (renamed from drivers/serial/8250_pci.c)0
-rw-r--r--drivers/tty/serial/8250_pnp.c (renamed from drivers/serial/8250_pnp.c)0
-rw-r--r--drivers/tty/serial/Kconfig (renamed from drivers/serial/Kconfig)4
-rw-r--r--drivers/tty/serial/Makefile (renamed from drivers/serial/Makefile)0
-rw-r--r--drivers/tty/serial/altera_jtaguart.c (renamed from drivers/serial/altera_jtaguart.c)0
-rw-r--r--drivers/tty/serial/altera_uart.c (renamed from drivers/serial/altera_uart.c)0
-rw-r--r--drivers/tty/serial/amba-pl010.c (renamed from drivers/serial/amba-pl010.c)0
-rw-r--r--drivers/tty/serial/amba-pl011.c (renamed from drivers/serial/amba-pl011.c)0
-rw-r--r--drivers/tty/serial/apbuart.c (renamed from drivers/serial/apbuart.c)0
-rw-r--r--drivers/tty/serial/apbuart.h (renamed from drivers/serial/apbuart.h)0
-rw-r--r--drivers/tty/serial/atmel_serial.c (renamed from drivers/serial/atmel_serial.c)0
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c (renamed from drivers/serial/bcm63xx_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_5xx.c (renamed from drivers/serial/bfin_5xx.c)0
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c (renamed from drivers/serial/bfin_sport_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h (renamed from drivers/serial/bfin_sport_uart.h)0
-rw-r--r--drivers/tty/serial/clps711x.c (renamed from drivers/serial/clps711x.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile (renamed from drivers/serial/cpm_uart/Makefile)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h (renamed from drivers/serial/cpm_uart/cpm_uart.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c (renamed from drivers/serial/cpm_uart/cpm_uart_core.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.h)0
-rw-r--r--drivers/tty/serial/crisv10.c (renamed from drivers/serial/crisv10.c)0
-rw-r--r--drivers/tty/serial/crisv10.h (renamed from drivers/serial/crisv10.h)0
-rw-r--r--drivers/tty/serial/dz.c (renamed from drivers/serial/dz.c)0
-rw-r--r--drivers/tty/serial/dz.h (renamed from drivers/serial/dz.h)0
-rw-r--r--drivers/tty/serial/icom.c (renamed from drivers/serial/icom.c)0
-rw-r--r--drivers/tty/serial/icom.h (renamed from drivers/serial/icom.h)0
-rw-r--r--drivers/tty/serial/ifx6x60.c (renamed from drivers/serial/ifx6x60.c)0
-rw-r--r--drivers/tty/serial/ifx6x60.h (renamed from drivers/serial/ifx6x60.h)0
-rw-r--r--drivers/tty/serial/imx.c (renamed from drivers/serial/imx.c)0
-rw-r--r--drivers/tty/serial/ioc3_serial.c (renamed from drivers/serial/ioc3_serial.c)0
-rw-r--r--drivers/tty/serial/ioc4_serial.c (renamed from drivers/serial/ioc4_serial.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.c (renamed from drivers/serial/ip22zilog.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.h (renamed from drivers/serial/ip22zilog.h)0
-rw-r--r--drivers/tty/serial/jsm/Makefile (renamed from drivers/serial/jsm/Makefile)0
-rw-r--r--drivers/tty/serial/jsm/jsm.h (renamed from drivers/serial/jsm/jsm.h)0
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c (renamed from drivers/serial/jsm/jsm_driver.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c (renamed from drivers/serial/jsm/jsm_neo.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c (renamed from drivers/serial/jsm/jsm_tty.c)0
-rw-r--r--drivers/tty/serial/kgdboc.c (renamed from drivers/serial/kgdboc.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.c (renamed from drivers/serial/m32r_sio.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.h (renamed from drivers/serial/m32r_sio.h)0
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h (renamed from drivers/serial/m32r_sio_reg.h)0
-rw-r--r--drivers/tty/serial/max3100.c (renamed from drivers/serial/max3100.c)0
-rw-r--r--drivers/tty/serial/max3107-aava.c (renamed from drivers/serial/max3107-aava.c)0
-rw-r--r--drivers/tty/serial/max3107.c (renamed from drivers/serial/max3107.c)0
-rw-r--r--drivers/tty/serial/max3107.h (renamed from drivers/serial/max3107.h)0
-rw-r--r--drivers/tty/serial/mcf.c (renamed from drivers/serial/mcf.c)0
-rw-r--r--drivers/tty/serial/mfd.c (renamed from drivers/serial/mfd.c)0
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c (renamed from drivers/serial/mpc52xx_uart.c)0
-rw-r--r--drivers/tty/serial/mpsc.c (renamed from drivers/serial/mpsc.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.c (renamed from drivers/serial/mrst_max3110.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.h (renamed from drivers/serial/mrst_max3110.h)0
-rw-r--r--drivers/tty/serial/msm_serial.c (renamed from drivers/serial/msm_serial.c)0
-rw-r--r--drivers/tty/serial/msm_serial.h (renamed from drivers/serial/msm_serial.h)0
-rw-r--r--drivers/tty/serial/mux.c (renamed from drivers/serial/mux.c)0
-rw-r--r--drivers/tty/serial/netx-serial.c (renamed from drivers/serial/netx-serial.c)0
-rw-r--r--drivers/tty/serial/nwpserial.c (renamed from drivers/serial/nwpserial.c)0
-rw-r--r--drivers/tty/serial/of_serial.c (renamed from drivers/serial/of_serial.c)0
-rw-r--r--drivers/tty/serial/omap-serial.c (renamed from drivers/serial/omap-serial.c)0
-rw-r--r--drivers/tty/serial/pch_uart.c (renamed from drivers/serial/pch_uart.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.c (renamed from drivers/serial/pmac_zilog.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.h (renamed from drivers/serial/pmac_zilog.h)0
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c (renamed from drivers/serial/pnx8xxx_uart.c)0
-rw-r--r--drivers/tty/serial/pxa.c (renamed from drivers/serial/pxa.c)0
-rw-r--r--drivers/tty/serial/s3c2400.c (renamed from drivers/serial/s3c2400.c)0
-rw-r--r--drivers/tty/serial/s3c2410.c (renamed from drivers/serial/s3c2410.c)0
-rw-r--r--drivers/tty/serial/s3c2412.c (renamed from drivers/serial/s3c2412.c)0
-rw-r--r--drivers/tty/serial/s3c2440.c (renamed from drivers/serial/s3c2440.c)0
-rw-r--r--drivers/tty/serial/s3c24a0.c (renamed from drivers/serial/s3c24a0.c)0
-rw-r--r--drivers/tty/serial/s3c6400.c (renamed from drivers/serial/s3c6400.c)0
-rw-r--r--drivers/tty/serial/s5pv210.c (renamed from drivers/serial/s5pv210.c)0
-rw-r--r--drivers/tty/serial/sa1100.c (renamed from drivers/serial/sa1100.c)0
-rw-r--r--drivers/tty/serial/samsung.c (renamed from drivers/serial/samsung.c)0
-rw-r--r--drivers/tty/serial/samsung.h (renamed from drivers/serial/samsung.h)0
-rw-r--r--drivers/tty/serial/sb1250-duart.c (renamed from drivers/serial/sb1250-duart.c)0
-rw-r--r--drivers/tty/serial/sc26xx.c (renamed from drivers/serial/sc26xx.c)0
-rw-r--r--drivers/tty/serial/serial_core.c (renamed from drivers/serial/serial_core.c)0
-rw-r--r--drivers/tty/serial/serial_cs.c (renamed from drivers/serial/serial_cs.c)0
-rw-r--r--drivers/tty/serial/serial_ks8695.c (renamed from drivers/serial/serial_ks8695.c)0
-rw-r--r--drivers/tty/serial/serial_lh7a40x.c (renamed from drivers/serial/serial_lh7a40x.c)0
-rw-r--r--drivers/tty/serial/serial_txx9.c (renamed from drivers/serial/serial_txx9.c)0
-rw-r--r--drivers/tty/serial/sh-sci.c (renamed from drivers/serial/sh-sci.c)0
-rw-r--r--drivers/tty/serial/sh-sci.h (renamed from drivers/serial/sh-sci.h)0
-rw-r--r--drivers/tty/serial/sn_console.c (renamed from drivers/serial/sn_console.c)0
-rw-r--r--drivers/tty/serial/suncore.c (renamed from drivers/serial/suncore.c)0
-rw-r--r--drivers/tty/serial/suncore.h (renamed from drivers/serial/suncore.h)0
-rw-r--r--drivers/tty/serial/sunhv.c (renamed from drivers/serial/sunhv.c)0
-rw-r--r--drivers/tty/serial/sunsab.c (renamed from drivers/serial/sunsab.c)0
-rw-r--r--drivers/tty/serial/sunsab.h (renamed from drivers/serial/sunsab.h)0
-rw-r--r--drivers/tty/serial/sunsu.c (renamed from drivers/serial/sunsu.c)0
-rw-r--r--drivers/tty/serial/sunzilog.c (renamed from drivers/serial/sunzilog.c)0
-rw-r--r--drivers/tty/serial/sunzilog.h (renamed from drivers/serial/sunzilog.h)0
-rw-r--r--drivers/tty/serial/timbuart.c (renamed from drivers/serial/timbuart.c)0
-rw-r--r--drivers/tty/serial/timbuart.h (renamed from drivers/serial/timbuart.h)0
-rw-r--r--drivers/tty/serial/uartlite.c (renamed from drivers/serial/uartlite.c)0
-rw-r--r--drivers/tty/serial/ucc_uart.c (renamed from drivers/serial/ucc_uart.c)0
-rw-r--r--drivers/tty/serial/vr41xx_siu.c (renamed from drivers/serial/vr41xx_siu.c)0
-rw-r--r--drivers/tty/serial/vt8500_serial.c (renamed from drivers/serial/vt8500_serial.c)0
-rw-r--r--drivers/tty/serial/zs.c (renamed from drivers/serial/zs.c)0
-rw-r--r--drivers/tty/serial/zs.h (renamed from drivers/serial/zs.h)0
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/backlight/88pm860x_bl.c4
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/virtio/virtio_pci.c20
-rw-r--r--drivers/xen/xenfs/xenbus.c31
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/cifs/cifs_debug.c10
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_unicode.c127
-rw-r--r--fs/cifs/cifsacl.c13
-rw-r--r--fs/cifs/cifsfs.c44
-rw-r--r--fs/cifs/cifsfs.h15
-rw-r--r--fs/cifs/cifsglob.h64
-rw-r--r--fs/cifs/cifspdu.h62
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c113
-rw-r--r--fs/cifs/connect.c190
-rw-r--r--fs/cifs/file.c289
-rw-r--r--fs/cifs/inode.c8
-rw-r--r--fs/cifs/misc.c73
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/sess.c15
-rw-r--r--fs/cifs/transport.c434
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/ext3/super.c25
-rw-r--r--fs/ext4/super.c25
-rw-r--r--fs/gfs2/inode.c72
-rw-r--r--fs/gfs2/inode.h1
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/ocfs2/super.c5
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/proc/Kconfig6
-rw-r--r--fs/quota/dquot.c18
-rw-r--r--fs/quota/quota.c41
-rw-r--r--fs/reiserfs/super.c17
-rw-r--r--fs/sysfs/Kconfig2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actypes.h2
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/acpi_io.h16
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/caif/Kbuild2
-rw-r--r--include/linux/cpu_rmap.h73
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/interrupt.h33
-rw-r--r--include/linux/ip_vs.h8
-rw-r--r--include/linux/irqdesc.h17
-rw-r--r--include/linux/kernel.h34
-rw-r--r--include/linux/kmemcheck.h2
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/memcontrol.h9
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/module.h27
-rw-r--r--include/linux/moduleparam.h6
-rw-r--r--include/linux/mroute.h1
-rw-r--r--include/linux/netdevice.h141
-rw-r--r--include/linux/netfilter.h27
-rw-r--r--include/linux/netfilter/Kbuild6
-rw-r--r--include/linux/netfilter/ipset/Kbuild4
-rw-r--r--include/linux/netfilter/ipset/ip_set.h452
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h1074
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h31
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h21
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h26
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h27
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h127
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h35
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h9
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h9
-rw-r--r--include/linux/netfilter/x_tables.h3
-rw-r--r--include/linux/netfilter/xt_AUDIT.h30
-rw-r--r--include/linux/netfilter/xt_CT.h12
-rw-r--r--include/linux/netfilter/xt_NFQUEUE.h6
-rw-r--r--include/linux/netfilter/xt_TCPOPTSTRIP.h4
-rw-r--r--include/linux/netfilter/xt_TPROXY.h10
-rw-r--r--include/linux/netfilter/xt_cluster.h10
-rw-r--r--include/linux/netfilter/xt_comment.h2
-rw-r--r--include/linux/netfilter/xt_connlimit.h16
-rw-r--r--include/linux/netfilter/xt_conntrack.h15
-rw-r--r--include/linux/netfilter/xt_devgroup.h21
-rw-r--r--include/linux/netfilter/xt_quota.h8
-rw-r--r--include/linux/netfilter/xt_set.h56
-rw-r--r--include/linux/netfilter/xt_socket.h2
-rw-r--r--include/linux/netfilter/xt_time.h16
-rw-r--r--include/linux/netfilter/xt_u32.h18
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_among.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_arp.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_ip.h14
-rw-r--r--include/linux/netfilter_bridge/ebt_ip6.h25
-rw-r--r--include/linux/netfilter_bridge/ebt_limit.h10
-rw-r--r--include/linux/netfilter_bridge/ebt_log.h8
-rw-r--r--include/linux/netfilter_bridge/ebt_mark_m.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_nflog.h12
-rw-r--r--include/linux/netfilter_bridge/ebt_pkttype.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_stp.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_ulog.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_vlan.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_CLUSTERIP.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ECN.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_SAME.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_TTL.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_addrtype.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ah.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_ecn.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_ttl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_HL.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ah.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_frag.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_hl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ipv6header.h8
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mh.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_opts.h12
-rw-r--r--include/linux/netfilter_ipv6/ip6t_rt.h13
-rw-r--r--include/linux/pkt_sched.h41
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/virtio_config.h5
-rw-r--r--include/media/mt9v011.h17
-rw-r--r--include/media/rc-core.h3
-rw-r--r--include/media/saa7146.h2
-rw-r--r--include/media/v4l2-common.h13
-rw-r--r--include/media/v4l2-ctrls.h7
-rw-r--r--include/media/v4l2-subdev.h23
-rw-r--r--include/net/dst.h117
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/inetpeer.h13
-rw-r--r--include/net/ip_fib.h23
-rw-r--r--include/net/ip_vs.h297
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h23
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h12
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h10
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h65
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_core.h4
-rw-r--r--include/net/netlink.h9
-rw-r--r--include/net/netns/conntrack.h4
-rw-r--r--include/net/netns/ip_vs.h143
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h67
-rw-r--r--include/net/sctp/user.h1
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/net/udp.h2
-rw-r--r--init/Kconfig60
-rw-r--r--init/main.c13
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/irq/Kconfig3
-rw-r--r--kernel/irq/handle.c111
-rw-r--r--kernel/irq/manage.c82
-rw-r--r--kernel/lockdep.c18
-rw-r--r--kernel/params.c65
-rw-r--r--kernel/perf_event.c46
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_autogroup.c32
-rw-r--r--kernel/sched_autogroup.h4
-rw-r--r--kernel/sched_debug.c42
-rw-r--r--kernel/sched_fair.c113
-rw-r--r--kernel/smp.c62
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/workqueue.c20
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--lib/textsearch.c10
-rw-r--r--lib/xz/Kconfig12
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memblock.c8
-rw-r--r--mm/memcontrol.c190
-rw-r--r--mm/truncate.c11
-rw-r--r--mm/vmscan.c1
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/Kconfig6
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c2
-rw-r--r--net/batman-adv/aggregation.h2
-rw-r--r--net/batman-adv/bat_debugfs.c6
-rw-r--r--net/batman-adv/bat_debugfs.h2
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/hard-interface.h6
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h7
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h23
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h17
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c26
-rw-r--r--net/batman-adv/routing.h5
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/types.h6
-rw-r--r--net/batman-adv/unicast.c21
-rw-r--r--net/batman-adv/unicast.h25
-rw-r--r--net/batman-adv/vis.c16
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c46
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/caif/cfcnfg.c11
-rw-r--r--net/caif/cfdgml.c1
-rw-r--r--net/caif/cfserl.c1
-rw-r--r--net/caif/cfutill.c2
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/can/bcm.c3
-rw-r--r--net/can/raw.c3
-rw-r--r--net/core/dev.c261
-rw-r--r--net/core/dst.c39
-rw-r--r--net/core/ethtool.c4
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/net-sysfs.c17
-rw-r--r--net/core/pktgen.c234
-rw-r--r--net/core/rtnetlink.c51
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/dcb/dcbnl.c13
-rw-r--r--net/dccp/ccids/ccid2.c9
-rw-r--r--net/decnet/dn_route.c18
-rw-r--r--net/decnet/dn_table.c1
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/Kconfig42
-rw-r--r--net/ipv4/Makefile4
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/arp.c11
-rw-r--r--net/ipv4/fib_frontend.c23
-rw-r--r--net/ipv4/fib_hash.c1133
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_rules.c10
-rw-r--r--net/ipv4/fib_semantics.c125
-rw-r--r--net/ipv4/fib_trie.c207
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inetpeer.c3
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ipmr.c46
-rw-r--r--net/ipv4/netfilter/Kconfig3
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c3
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c33
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c9
-rw-r--r--net/ipv4/raw.c19
-rw-r--r--net/ipv4/route.c99
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/addrconf.c84
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c3
-rw-r--r--net/ipv6/raw.c14
-rw-r--r--net/ipv6/route.c57
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/mac80211/Kconfig6
-rw-r--r--net/netfilter/Kconfig66
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c20
-rw-r--r--net/netfilter/ipset/Kconfig121
-rw-r--r--net/netfilter/ipset/Makefile24
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c587
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c652
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c515
-rw-r--r--net/netfilter/ipset/ip_set_core.c1671
-rw-r--r--net/netfilter/ipset/ip_set_getport.c141
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c464
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c544
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c562
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c628
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c458
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c578
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c584
-rw-r--r--net/netfilter/ipset/pfxlen.c291
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c98
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c195
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c376
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c892
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c134
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c61
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c72
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c129
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c153
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c142
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c110
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c1239
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c26
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c82
-rw-r--r--net/netfilter/nf_conntrack_core.c57
-rw-r--r--net/netfilter/nf_conntrack_expect.c34
-rw-r--r--net/netfilter/nf_conntrack_extend.c11
-rw-r--r--net/netfilter/nf_conntrack_helper.c20
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c74
-rw-r--r--net/netfilter/nf_conntrack_netlink.c53
-rw-r--r--net/netfilter/nf_conntrack_proto.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c14
-rw-r--r--net/netfilter/nf_conntrack_snmp.c77
-rw-r--r--net/netfilter/nf_conntrack_standalone.c45
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c120
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nf_queue.c82
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c22
-rw-r--r--net/netfilter/x_tables.c98
-rw-r--r--net/netfilter/xt_AUDIT.c204
-rw-r--r--net/netfilter/xt_CLASSIFY.c36
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_LED.c2
-rw-r--r--net/netfilter/xt_NFQUEUE.c34
-rw-r--r--net/netfilter/xt_connlimit.c62
-rw-r--r--net/netfilter/xt_conntrack.c75
-rw-r--r--net/netfilter/xt_cpu.c2
-rw-r--r--net/netfilter/xt_devgroup.c82
-rw-r--r--net/netfilter/xt_iprange.c18
-rw-r--r--net/netfilter/xt_ipvs.c2
-rw-r--r--net/netfilter/xt_set.c359
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/sched/Kconfig28
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_api.c46
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c4
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c9
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/cls_api.c33
-rw-r--r--net/sched/cls_basic.c17
-rw-r--r--net/sched/cls_cgroup.c8
-rw-r--r--net/sched/cls_flow.c6
-rw-r--r--net/sched/cls_fw.c38
-rw-r--r--net/sched/cls_route.c126
-rw-r--r--net/sched/cls_rsvp.h95
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/cls_u32.c77
-rw-r--r--net/sched/em_cmp.c47
-rw-r--r--net/sched/em_meta.c44
-rw-r--r--net/sched/em_nbyte.c3
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sched/em_u32.c2
-rw-r--r--net/sched/ematch.c37
-rw-r--r--net/sched/sch_api.c169
-rw-r--r--net/sched/sch_atm.c16
-rw-r--r--net/sched/sch_cbq.c365
-rw-r--r--net/sched/sch_choke.c677
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c23
-rw-r--r--net/sched/sch_fifo.c27
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_gred.c85
-rw-r--r--net/sched/sch_hfsc.c39
-rw-r--r--net/sched/sch_htb.c118
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_mqprio.c416
-rw-r--r--net/sched/sch_multiq.c10
-rw-r--r--net/sched/sch_netem.c11
-rw-r--r--net/sched/sch_prio.c36
-rw-r--r--net/sched/sch_red.c72
-rw-r--r--net/sched/sch_sfq.c72
-rw-r--r--net/sched/sch_tbf.c41
-rw-r--r--net/sched/sch_teql.c39
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/unix/af_unix.c66
-rw-r--r--net/wanrouter/wanmain.c2
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/keys/Makefile4
-rw-r--r--security/keys/compat.c17
-rw-r--r--security/keys/encrypted.c (renamed from security/keys/encrypted_defined.c)2
-rw-r--r--security/keys/encrypted.h (renamed from security/keys/encrypted_defined.h)0
-rw-r--r--security/keys/gc.c14
-rw-r--r--security/keys/internal.h26
-rw-r--r--security/keys/key.c320
-rw-r--r--security/keys/keyctl.c355
-rw-r--r--security/keys/keyring.c295
-rw-r--r--security/keys/permission.c33
-rw-r--r--security/keys/proc.c17
-rw-r--r--security/keys/process_keys.c135
-rw-r--r--security/keys/request_key.c164
-rw-r--r--security/keys/request_key_auth.c62
-rw-r--r--security/keys/trusted.c (renamed from security/keys/trusted_defined.c)54
-rw-r--r--security/keys/trusted.h (renamed from security/keys/trusted_defined.h)0
-rw-r--r--security/keys/user_defined.c32
-rw-r--r--security/selinux/ss/conditional.c2
-rw-r--r--security/selinux/ss/policydb.c4
-rw-r--r--sound/pci/hda/patch_realtek.c26
-rw-r--r--sound/pci/ice1712/delta.c7
-rw-r--r--sound/soc/blackfin/Kconfig11
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c4
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c10
-rw-r--r--sound/soc/pxa/z2.c3
-rw-r--r--tools/perf/Makefile9
-rw-r--r--tools/perf/builtin-annotate.c6
-rw-r--r--tools/perf/builtin-kmem.c4
-rw-r--r--tools/perf/builtin-lock.c6
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-sched.c20
-rw-r--r--tools/perf/builtin-script.c6
-rw-r--r--tools/perf/builtin-stat.c4
-rw-r--r--tools/perf/builtin-test.c54
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/util/event.c5
-rw-r--r--tools/perf/util/header.c4
-rw-r--r--tools/perf/util/hist.c17
-rw-r--r--tools/perf/util/include/linux/bitops.h1
-rw-r--r--tools/perf/util/map.c3
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.h2
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/session.c28
-rw-r--r--tools/perf/util/svghelper.c9
-rw-r--r--tools/perf/util/symbol.c16
-rw-r--r--tools/perf/util/types.h10
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/ui/browsers/map.c5
-rw-r--r--tools/perf/util/values.c10
-rw-r--r--usr/Kconfig18
1396 files changed, 33340 insertions, 15703 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-at91 b/Documentation/ABI/testing/sysfs-platform-at91
new file mode 100644
index 000000000000..4cc6a865ae66
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-at91
@@ -0,0 +1,25 @@
+What: /sys/devices/platform/at91_can/net/<iface>/mb0_id
+Date: January 2011
+KernelVersion: 2.6.38
+Contact: Marc Kleine-Budde <kernel@pengutronix.de>
+Description:
+ Value representing the can_id of mailbox 0.
+
+ Default: 0x7ff (standard frame)
+
+ Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in
+ "AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the
+ contents of mailbox 0 may be send under certain
+ conditions (even if disabled or in rx mode).
+
+ The workaround in the errata suggests not to use the
+ mailbox and load it with an unused identifier.
+
+ In order to use an extended can_id add the
+ CAN_EFF_FLAG (0x80000000U) to the can_id. Example:
+
+ - standard id 0x7ff:
+ echo 0x7ff > /sys/class/net/can0/mb0_id
+
+ - extended id 0x1fffffff:
+ echo 0x9fffffff > /sys/class/net/can0/mb0_id
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 35447e081736..36f63d4a0a06 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -217,8 +217,8 @@ X!Isound/sound_firmware.c
<chapter id="uart16x50">
<title>16x50 UART Driver</title>
!Iinclude/linux/serial_core.h
-!Edrivers/serial/serial_core.c
-!Edrivers/serial/8250.c
+!Edrivers/tty/serial/serial_core.c
+!Edrivers/tty/serial/8250.c
</chapter>
<chapter id="fbdev">
diff --git a/Documentation/DocBook/dvb/dvbapi.xml b/Documentation/DocBook/dvb/dvbapi.xml
index e3a97fdd62a6..ad8678d48916 100644
--- a/Documentation/DocBook/dvb/dvbapi.xml
+++ b/Documentation/DocBook/dvb/dvbapi.xml
@@ -28,7 +28,7 @@
<holder>Convergence GmbH</holder>
</copyright>
<copyright>
- <year>2009-2010</year>
+ <year>2009-2011</year>
<holder>Mauro Carvalho Chehab</holder>
</copyright>
diff --git a/Documentation/DocBook/media.tmpl b/Documentation/DocBook/media.tmpl
index f11048d4053f..a99088aae1aa 100644
--- a/Documentation/DocBook/media.tmpl
+++ b/Documentation/DocBook/media.tmpl
@@ -28,7 +28,7 @@
<title>LINUX MEDIA INFRASTRUCTURE API</title>
<copyright>
- <year>2009-2010</year>
+ <year>2009-2011</year>
<holder>LinuxTV Developers</holder>
</copyright>
@@ -86,7 +86,7 @@ Foundation. A copy of the license is included in the chapter entitled
</author>
</authorgroup>
<copyright>
- <year>2009-2010</year>
+ <year>2009-2011</year>
<holder>Mauro Carvalho Chehab</holder>
</copyright>
diff --git a/Documentation/DocBook/v4l/dev-rds.xml b/Documentation/DocBook/v4l/dev-rds.xml
index 360d2737e649..2427f54397e7 100644
--- a/Documentation/DocBook/v4l/dev-rds.xml
+++ b/Documentation/DocBook/v4l/dev-rds.xml
@@ -75,6 +75,7 @@ as follows:</para>
</section>
<section>
+ <title>RDS datastructures</title>
<table frame="none" pgwide="1" id="v4l2-rds-data">
<title>struct
<structname>v4l2_rds_data</structname></title>
@@ -129,10 +130,11 @@ as follows:</para>
<table frame="none" pgwide="1" id="v4l2-rds-block-codes">
<title>Block defines</title>
- <tgroup cols="3">
+ <tgroup cols="4">
<colspec colname="c1" colwidth="1*" />
<colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="5*" />
+ <colspec colname="c3" colwidth="1*" />
+ <colspec colname="c4" colwidth="5*" />
<tbody valign="top">
<row>
<entry>V4L2_RDS_BLOCK_MSK</entry>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 839e93e875ae..9288af96de34 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -100,6 +100,7 @@ Remote Controller chapter.</contrib>
<year>2008</year>
<year>2009</year>
<year>2010</year>
+ <year>2011</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
</copyright>
@@ -381,7 +382,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 2.6.33</subtitle>
+ <subtitle>Revision 2.6.38</subtitle>
<chapter id="common">
&sub-common;
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 8c594c45b6a1..ccb6048415b2 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -357,14 +357,6 @@ Who: Dave Jones <davej@redhat.com>, Matthew Garrett <mjg@redhat.com>
-----------------------------
-What: __do_IRQ all in one fits nothing interrupt handler
-When: 2.6.32
-Why: __do_IRQ was kept for easy migration to the type flow handlers.
- More than two years of migration time is enough.
-Who: Thomas Gleixner <tglx@linutronix.de>
-
------------------------------
-
What: fakephp and associated sysfs files in /sys/bus/pci/slots/
When: 2011
Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -611,3 +603,10 @@ Why: The adm9240, w83792d and w83793 hardware monitoring drivers have
Who: Jean Delvare <khali@linux-fr.org>
----------------------------
+
+What: xt_connlimit rev 0
+When: 2012
+Who: Jan Engelhardt <jengelh@medozas.de>
+Files: net/netfilter/xt_connlimit.c
+
+----------------------------
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index dc73bc54cc4e..d9da7e148538 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -39,6 +39,9 @@
#include <limits.h>
#include <stddef.h>
#include <signal.h>
+#include <pwd.h>
+#include <grp.h>
+
#include <linux/virtio_config.h>
#include <linux/virtio_net.h>
#include <linux/virtio_blk.h>
@@ -298,20 +301,27 @@ static void *map_zeroed_pages(unsigned int num)
/*
* We use a private mapping (ie. if we write to the page, it will be
- * copied).
+ * copied). We allocate an extra two pages PROT_NONE to act as guard
+ * pages against read/write attempts that exceed allocated space.
*/
- addr = mmap(NULL, getpagesize() * num,
- PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0);
+ addr = mmap(NULL, getpagesize() * (num+2),
+ PROT_NONE, MAP_PRIVATE, fd, 0);
+
if (addr == MAP_FAILED)
err(1, "Mmapping %u pages of /dev/zero", num);
+ if (mprotect(addr + getpagesize(), getpagesize() * num,
+ PROT_READ|PROT_WRITE) == -1)
+ err(1, "mprotect rw %u pages failed", num);
+
/*
* One neat mmap feature is that you can close the fd, and it
* stays mapped.
*/
close(fd);
- return addr;
+ /* Return address after PROT_NONE page */
+ return addr + getpagesize();
}
/* Get some more pages for a device. */
@@ -343,7 +353,7 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
* done to it. This allows us to share untouched memory between
* Guests.
*/
- if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC,
+ if (mmap(addr, len, PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
return;
@@ -573,10 +583,10 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
unsigned int line)
{
/*
- * We have to separately check addr and addr+size, because size could
- * be huge and addr + size might wrap around.
+ * Check if the requested address and size exceeds the allocated memory,
+ * or addr + size wraps around.
*/
- if (addr >= guest_limit || addr + size >= guest_limit)
+ if ((addr + size) > guest_limit || (addr + size) < addr)
errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
/*
* We return a pointer for the caller's convenience, now we know it's
@@ -1872,6 +1882,8 @@ static struct option opts[] = {
{ "block", 1, NULL, 'b' },
{ "rng", 0, NULL, 'r' },
{ "initrd", 1, NULL, 'i' },
+ { "username", 1, NULL, 'u' },
+ { "chroot", 1, NULL, 'c' },
{ NULL },
};
static void usage(void)
@@ -1894,6 +1906,12 @@ int main(int argc, char *argv[])
/* If they specify an initrd file to load. */
const char *initrd_name = NULL;
+ /* Password structure for initgroups/setres[gu]id */
+ struct passwd *user_details = NULL;
+
+ /* Directory to chroot to */
+ char *chroot_path = NULL;
+
/* Save the args: we "reboot" by execing ourselves again. */
main_args = argv;
@@ -1950,6 +1968,14 @@ int main(int argc, char *argv[])
case 'i':
initrd_name = optarg;
break;
+ case 'u':
+ user_details = getpwnam(optarg);
+ if (!user_details)
+ err(1, "getpwnam failed, incorrect username?");
+ break;
+ case 'c':
+ chroot_path = optarg;
+ break;
default:
warnx("Unknown argument %s", argv[optind]);
usage();
@@ -2021,6 +2047,37 @@ int main(int argc, char *argv[])
/* If we exit via err(), this kills all the threads, restores tty. */
atexit(cleanup_devices);
+ /* If requested, chroot to a directory */
+ if (chroot_path) {
+ if (chroot(chroot_path) != 0)
+ err(1, "chroot(\"%s\") failed", chroot_path);
+
+ if (chdir("/") != 0)
+ err(1, "chdir(\"/\") failed");
+
+ verbose("chroot done\n");
+ }
+
+ /* If requested, drop privileges */
+ if (user_details) {
+ uid_t u;
+ gid_t g;
+
+ u = user_details->pw_uid;
+ g = user_details->pw_gid;
+
+ if (initgroups(user_details->pw_name, g) != 0)
+ err(1, "initgroups failed");
+
+ if (setresgid(g, g, g) != 0)
+ err(1, "setresgid failed");
+
+ if (setresuid(u, u, u) != 0)
+ err(1, "setresuid failed");
+
+ verbose("Dropping privileges completed\n");
+ }
+
/* Finally, run the Guest. This doesn't return. */
run_guest();
}
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index 6ccaf8e1a00e..dad99978a6a8 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -117,6 +117,11 @@ Running Lguest:
for general information on how to get bridging to work.
+- Random number generation. Using the --rng option will provide a
+ /dev/hwrng in the guest that will read from the host's /dev/random.
+ Use this option in conjunction with rng-tools (see ../hw_random.txt)
+ to provide entropy to the guest kernel's /dev/random.
+
There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest
Good luck!
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 77f0cdd5b0dd..18afcd8afd51 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
BATMAN-ADV
----------
@@ -67,15 +67,16 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms bonding fragmentation orig_interval
-# vis_mode
+# aggregated_ogms gw_bandwidth hop_penalty
+# bonding gw_mode orig_interval
+# fragmentation gw_sel_class vis_mode
There is a special folder for debugging informations:
# ls /sys/kernel/debug/batman_adv/bat0/
-# originators socket transtable_global transtable_local
-# vis_data
+# gateways socket transtable_global vis_data
+# originators softif_neigh transtable_local
Some of the files contain all sort of status information regard-
@@ -230,9 +231,8 @@ CONTACT
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
- (optional subscription at
- https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
+ at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5dc638791d97..25d2f4141d27 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -49,7 +49,8 @@ Table of Contents
3.3 Configuring Bonding Manually with Ifenslave
3.3.1 Configuring Multiple Bonds Manually
3.4 Configuring Bonding Manually via Sysfs
-3.5 Overriding Configuration for Special Cases
+3.5 Configuration with Interfaces Support
+3.6 Overriding Configuration for Special Cases
4. Querying Bonding Configuration
4.1 Bonding Configuration
@@ -161,8 +162,8 @@ onwards) do not have /usr/include/linux symbolically linked to the
default kernel source include directory.
SECOND IMPORTANT NOTE:
- If you plan to configure bonding using sysfs, you do not need
-to use ifenslave.
+ If you plan to configure bonding using sysfs or using the
+/etc/network/interfaces file, you do not need to use ifenslave.
2. Bonding Driver Options
=========================
@@ -779,22 +780,26 @@ resend_igmp
You can configure bonding using either your distro's network
initialization scripts, or manually using either ifenslave or the
-sysfs interface. Distros generally use one of two packages for the
-network initialization scripts: initscripts or sysconfig. Recent
-versions of these packages have support for bonding, while older
+sysfs interface. Distros generally use one of three packages for the
+network initialization scripts: initscripts, sysconfig or interfaces.
+Recent versions of these packages have support for bonding, while older
versions do not.
We will first describe the options for configuring bonding for
-distros using versions of initscripts and sysconfig with full or
-partial support for bonding, then provide information on enabling
+distros using versions of initscripts, sysconfig and interfaces with full
+or partial support for bonding, then provide information on enabling
bonding without support from the network initialization scripts (i.e.,
older versions of initscripts or sysconfig).
- If you're unsure whether your distro uses sysconfig or
-initscripts, or don't know if it's new enough, have no fear.
+ If you're unsure whether your distro uses sysconfig,
+initscripts or interfaces, or don't know if it's new enough, have no fear.
Determining this is fairly straightforward.
- First, issue the command:
+ First, look for a file called interfaces in /etc/network directory.
+If this file is present in your system, then your system use interfaces. See
+Configuration with Interfaces Support.
+
+ Else, issue the command:
$ rpm -qf /sbin/ifup
@@ -1327,8 +1332,62 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval
echo +eth2 > /sys/class/net/bond1/bonding/slaves
echo +eth3 > /sys/class/net/bond1/bonding/slaves
-3.5 Overriding Configuration for Special Cases
+3.5 Configuration with Interfaces Support
+-----------------------------------------
+
+ This section applies to distros which use /etc/network/interfaces file
+to describe network interface configuration, most notably Debian and it's
+derivatives.
+
+ The ifup and ifdown commands on Debian don't support bonding out of
+the box. The ifenslave-2.6 package should be installed to provide bonding
+support. Once installed, this package will provide bond-* options to be used
+into /etc/network/interfaces.
+
+ Note that ifenslave-2.6 package will load the bonding module and use
+the ifenslave command when appropriate.
+
+Example Configurations
+----------------------
+
+In /etc/network/interfaces, the following stanza will configure bond0, in
+active-backup mode, with eth0 and eth1 as slaves.
+
+auto bond0
+iface bond0 inet dhcp
+ bond-slaves eth0 eth1
+ bond-mode active-backup
+ bond-miimon 100
+ bond-primary eth0 eth1
+
+If the above configuration doesn't work, you might have a system using
+upstart for system startup. This is most notably true for recent
+Ubuntu versions. The following stanza in /etc/network/interfaces will
+produce the same result on those systems.
+
+auto bond0
+iface bond0 inet dhcp
+ bond-slaves none
+ bond-mode active-backup
+ bond-miimon 100
+
+auto eth0
+iface eth0 inet manual
+ bond-master bond0
+ bond-primary eth0 eth1
+
+auto eth1
+iface eth1 inet manual
+ bond-master bond0
+ bond-primary eth0 eth1
+
+For a full list of bond-* supported options in /etc/network/interfaces and some
+more advanced examples tailored to you particular distros, see the files in
+/usr/share/doc/ifenslave-2.6.
+
+3.6 Overriding Configuration for Special Cases
----------------------------------------------
+
When using the bonding driver, the physical port which transmits a frame is
typically selected by the bonding driver, and is not relevant to the user or
system administrator. The output port is simply selected using the policies of
diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt
index 37ba3a72cb76..bce23a4a7875 100644
--- a/Documentation/sound/alsa/soc/codec.txt
+++ b/Documentation/sound/alsa/soc/codec.txt
@@ -27,42 +27,38 @@ ASoC Codec driver breakdown
1 - Codec DAI and PCM configuration
-----------------------------------
-Each codec driver must have a struct snd_soc_codec_dai to define its DAI and
+Each codec driver must have a struct snd_soc_dai_driver to define its DAI and
PCM capabilities and operations. This struct is exported so that it can be
registered with the core by your machine driver.
e.g.
-struct snd_soc_codec_dai wm8731_dai = {
- .name = "WM8731",
- /* playback capabilities */
+static struct snd_soc_dai_ops wm8731_dai_ops = {
+ .prepare = wm8731_pcm_prepare,
+ .hw_params = wm8731_hw_params,
+ .shutdown = wm8731_shutdown,
+ .digital_mute = wm8731_mute,
+ .set_sysclk = wm8731_set_dai_sysclk,
+ .set_fmt = wm8731_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver wm8731_dai = {
+ .name = "wm8731-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM8731_RATES,
.formats = WM8731_FORMATS,},
- /* capture capabilities */
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8731_RATES,
.formats = WM8731_FORMATS,},
- /* pcm operations - see section 4 below */
- .ops = {
- .prepare = wm8731_pcm_prepare,
- .hw_params = wm8731_hw_params,
- .shutdown = wm8731_shutdown,
- },
- /* DAI operations - see DAI.txt */
- .dai_ops = {
- .digital_mute = wm8731_mute,
- .set_sysclk = wm8731_set_dai_sysclk,
- .set_fmt = wm8731_set_dai_fmt,
- }
+ .ops = &wm8731_dai_ops,
+ .symmetric_rates = 1,
};
-EXPORT_SYMBOL_GPL(wm8731_dai);
2 - Codec control IO
@@ -186,13 +182,14 @@ when the mute is applied or freed.
i.e.
-static int wm8974_mute(struct snd_soc_codec *codec,
- struct snd_soc_codec_dai *dai, int mute)
+static int wm8974_mute(struct snd_soc_dai *dai, int mute)
{
- u16 mute_reg = wm8974_read_reg_cache(codec, WM8974_DAC) & 0xffbf;
- if(mute)
- wm8974_write(codec, WM8974_DAC, mute_reg | 0x40);
+ struct snd_soc_codec *codec = dai->codec;
+ u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
+
+ if (mute)
+ snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
else
- wm8974_write(codec, WM8974_DAC, mute_reg);
+ snd_soc_write(codec, WM8974_DAC, mute_reg);
return 0;
}
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 2524c75557df..3e2ec9cbf397 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -12,6 +12,8 @@ the following struct:-
struct snd_soc_card {
char *name;
+ ...
+
int (*probe)(struct platform_device *pdev);
int (*remove)(struct platform_device *pdev);
@@ -22,12 +24,13 @@ struct snd_soc_card {
int (*resume_pre)(struct platform_device *pdev);
int (*resume_post)(struct platform_device *pdev);
- /* machine stream operations */
- struct snd_soc_ops *ops;
+ ...
/* CPU <--> Codec DAI links */
struct snd_soc_dai_link *dai_link;
int num_links;
+
+ ...
};
probe()/remove()
@@ -42,11 +45,6 @@ of any machine audio tasks that have to be done before or after the codec, DAIs
and DMA is suspended and resumed. Optional.
-Machine operations
-------------------
-The machine specific audio operations can be set here. Again this is optional.
-
-
Machine DAI Configuration
-------------------------
The machine DAI configuration glues all the codec and CPU DAIs together. It can
@@ -61,8 +59,10 @@ struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
static struct snd_soc_dai_link corgi_dai = {
.name = "WM8731",
.stream_name = "WM8731",
- .cpu_dai = &pxa_i2s_dai,
- .codec_dai = &wm8731_dai,
+ .cpu_dai_name = "pxa-is2-dai",
+ .codec_dai_name = "wm8731-hifi",
+ .platform_name = "pxa-pcm-audio",
+ .codec_name = "wm8713-codec.0-001a",
.init = corgi_wm8731_init,
.ops = &corgi_ops,
};
@@ -77,26 +77,6 @@ static struct snd_soc_card snd_soc_corgi = {
};
-Machine Audio Subsystem
------------------------
-
-The machine soc device glues the platform, machine and codec driver together.
-Private data can also be set here. e.g.
-
-/* corgi audio private data */
-static struct wm8731_setup_data corgi_wm8731_setup = {
- .i2c_address = 0x1b,
-};
-
-/* corgi audio subsystem */
-static struct snd_soc_device corgi_snd_devdata = {
- .machine = &snd_soc_corgi,
- .platform = &pxa2xx_soc_platform,
- .codec_dev = &soc_codec_dev_wm8731,
- .codec_data = &corgi_wm8731_setup,
-};
-
-
Machine Power Map
-----------------
diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt
index 06d835987c6a..d57efad37e0a 100644
--- a/Documentation/sound/alsa/soc/platform.txt
+++ b/Documentation/sound/alsa/soc/platform.txt
@@ -20,9 +20,10 @@ struct snd_soc_ops {
int (*trigger)(struct snd_pcm_substream *, int);
};
-The platform driver exports its DMA functionality via struct snd_soc_platform:-
+The platform driver exports its DMA functionality via struct
+snd_soc_platform_driver:-
-struct snd_soc_platform {
+struct snd_soc_platform_driver {
char *name;
int (*probe)(struct platform_device *pdev);
@@ -34,6 +35,13 @@ struct snd_soc_platform {
int (*pcm_new)(struct snd_card *, struct snd_soc_codec_dai *, struct snd_pcm *);
void (*pcm_free)(struct snd_pcm *);
+ /*
+ * For platform caused delay reporting.
+ * Optional.
+ */
+ snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
+ struct snd_soc_dai *);
+
/* platform stream ops */
struct snd_pcm_ops *pcm_ops;
};
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 8773778d23fc..881e7f44491b 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -285,6 +285,9 @@ implement g_volatile_ctrl like this:
The 'new value' union is not used in g_volatile_ctrl. In general controls
that need to implement g_volatile_ctrl are read-only controls.
+Note that if one or more controls in a control cluster are marked as volatile,
+then all the controls in the cluster are seen as volatile.
+
To mark a control as volatile you have to set the is_volatile flag:
ctrl = v4l2_ctrl_new_std(&sd->ctrl_handler, ...);
@@ -462,6 +465,15 @@ pointer to the v4l2_ctrl_ops struct that is used for that cluster.
Obviously, all controls in the cluster array must be initialized to either
a valid control or to NULL.
+In rare cases you might want to know which controls of a cluster actually
+were set explicitly by the user. For this you can check the 'is_new' flag of
+each control. For example, in the case of a volume/mute cluster the 'is_new'
+flag of the mute control would be set if the user called VIDIOC_S_CTRL for
+mute only. If the user would call VIDIOC_S_EXT_CTRLS for both mute and volume
+controls, then the 'is_new' flag would be 1 for both controls.
+
+The 'is_new' flag is always 1 when called from v4l2_ctrl_handler_setup().
+
VIDIOC_LOG_STATUS Support
=========================
diff --git a/MAINTAINERS b/MAINTAINERS
index 2b35b6c84e2c..424887b4a414 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -162,7 +162,7 @@ L: linux-serial@vger.kernel.org
W: http://serial.sourceforge.net
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F: drivers/serial/8250*
+F: drivers/tty/serial/8250*
F: include/linux/serial_8250.h
8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
@@ -624,11 +624,15 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-ARM/ATMEL AT91RM9200 ARM ARCHITECTURE
+ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
M: Andrew Victor <linux@maxim.org.za>
+M: Nicolas Ferre <nicolas.ferre@atmel.com>
+M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://maxim.org.za/at91_26.html
-S: Maintained
+W: http://www.linux4sam.org
+S: Supported
+F: arch/arm/mach-at91/
ARM/BCMRING ARM ARCHITECTURE
M: Jiandong Zheng <jdzheng@broadcom.com>
@@ -888,8 +892,8 @@ F: arch/arm/mach-msm/
F: drivers/video/msm/
F: drivers/mmc/host/msm_sdcc.c
F: drivers/mmc/host/msm_sdcc.h
-F: drivers/serial/msm_serial.h
-F: drivers/serial/msm_serial.c
+F: drivers/tty/serial/msm_serial.h
+F: drivers/tty/serial/msm_serial.c
T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
S: Maintained
@@ -1256,7 +1260,7 @@ F: drivers/mmc/host/atmel-mci-regs.h
ATMEL AT91 / AT32 SERIAL DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
-F: drivers/serial/atmel_serial.c
+F: drivers/tty/serial/atmel_serial.c
ATMEL LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1412,7 +1416,7 @@ M: Sonic Zhang <sonic.zhang@analog.com>
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
-F: drivers/serial/bfin_5xx.c
+F: drivers/tty/serial/bfin_5xx.c
BLACKFIN WATCHDOG DRIVER
M: Mike Frysinger <vapier.adi@gmail.com>
@@ -1877,7 +1881,7 @@ L: linux-cris-kernel@axis.com
W: http://developer.axis.com
S: Maintained
F: arch/cris/
-F: drivers/serial/crisv10.*
+F: drivers/tty/serial/crisv10.*
CRYPTO API
M: Herbert Xu <herbert@gondor.apana.org.au>
@@ -2216,7 +2220,7 @@ F: drivers/net/wan/dscc4.c
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
-F: drivers/serial/dz.*
+F: drivers/tty/serial/dz.*
EATA-DMA SCSI DRIVER
M: Michael Neuffer <mike@i-Connect.Net>
@@ -2643,7 +2647,7 @@ FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@freescale.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
-F: drivers/serial/ucc_uart.c
+F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@freescale.com>
@@ -3146,7 +3150,7 @@ S: Orphan
F: drivers/video/imsttfb.c
INFINIBAND SUBSYSTEM
-M: Roland Dreier <rolandd@cisco.com>
+M: Roland Dreier <roland@kernel.org>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
@@ -3349,7 +3353,7 @@ IOC3 SERIAL DRIVER
M: Pat Gefre <pfg@sgi.com>
L: linux-serial@vger.kernel.org
S: Maintained
-F: drivers/serial/ioc3_serial.c
+F: drivers/tty/serial/ioc3_serial.c
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
@@ -3526,7 +3530,7 @@ JSM Neo PCI based serial card
M: Breno Leitao <leitao@linux.vnet.ibm.com>
L: linux-serial@vger.kernel.org
S: Maintained
-F: drivers/serial/jsm/
+F: drivers/tty/serial/jsm/
K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
@@ -3669,6 +3673,28 @@ F: include/linux/key-type.h
F: include/keys/
F: security/keys/
+KEYS-TRUSTED
+M: David Safford <safford@watson.ibm.com>
+M: Mimi Zohar <zohar@us.ibm.com>
+L: linux-security-module@vger.kernel.org
+L: keyrings@linux-nfs.org
+S: Supported
+F: Documentation/keys-trusted-encrypted.txt
+F: include/keys/trusted-type.h
+F: security/keys/trusted.c
+F: security/keys/trusted.h
+
+KEYS-ENCRYPTED
+M: Mimi Zohar <zohar@us.ibm.com>
+M: David Safford <safford@watson.ibm.com>
+L: linux-security-module@vger.kernel.org
+L: keyrings@linux-nfs.org
+S: Supported
+F: Documentation/keys-trusted-encrypted.txt
+F: include/keys/encrypted-type.h
+F: security/keys/encrypted.c
+F: security/keys/encrypted.h
+
KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
W: http://kgdb.wiki.kernel.org/
@@ -3676,7 +3702,7 @@ L: kgdb-bugreport@lists.sourceforge.net
S: Maintained
F: Documentation/DocBook/kgdb.tmpl
F: drivers/misc/kgdbts.c
-F: drivers/serial/kgdboc.c
+F: drivers/tty/serial/kgdboc.c
F: include/linux/kdb.h
F: include/linux/kgdb.h
F: kernel/debug/
@@ -5545,7 +5571,7 @@ M: Pat Gefre <pfg@sgi.com>
L: linux-ia64@vger.kernel.org
S: Supported
F: Documentation/ia64/serial.txt
-F: drivers/serial/ioc?_serial.c
+F: drivers/tty/serial/ioc?_serial.c
F: include/linux/ioc?.h
SGI VISUAL WORKSTATION 320 AND 540
@@ -5567,7 +5593,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
F: arch/arm/mach-lh7a40x/
-F: drivers/serial/serial_lh7a40x.c
+F: drivers/tty/serial/serial_lh7a40x.c
F: drivers/usb/gadget/lh7a40*
F: drivers/usb/host/ohci-lh7a40*
@@ -5787,14 +5813,14 @@ L: sparclinux@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
S: Maintained
-F: drivers/serial/suncore.c
-F: drivers/serial/suncore.h
-F: drivers/serial/sunhv.c
-F: drivers/serial/sunsab.c
-F: drivers/serial/sunsab.h
-F: drivers/serial/sunsu.c
-F: drivers/serial/sunzilog.c
-F: drivers/serial/sunzilog.h
+F: drivers/tty/serial/suncore.c
+F: drivers/tty/serial/suncore.h
+F: drivers/tty/serial/sunhv.c
+F: drivers/tty/serial/sunsab.c
+F: drivers/tty/serial/sunsab.h
+F: drivers/tty/serial/sunsu.c
+F: drivers/tty/serial/sunzilog.c
+F: drivers/tty/serial/sunzilog.h
SPEAR PLATFORM SUPPORT
M: Viresh Kumar <viresh.kumar@st.com>
@@ -6124,8 +6150,8 @@ TTY LAYER
M: Greg Kroah-Hartman <gregkh@suse.de>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F: drivers/char/tty_*
-F: drivers/serial/serial_core.c
+F: drivers/tty/*
+F: drivers/tty/serial/serial_core.c
F: include/linux/serial_core.h
F: include/linux/serial.h
F: include/linux/tty.h
@@ -6870,7 +6896,7 @@ XILINX UARTLITE SERIAL DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk>
L: linux-serial@vger.kernel.org
S: Maintained
-F: drivers/serial/uartlite.c
+F: drivers/tty/serial/uartlite.c
YAM DRIVER FOR AX.25
M: Jean-Paul Roubelat <jpr@f6fbb.org>
@@ -6916,7 +6942,7 @@ F: drivers/media/video/zoran/
ZS DECSTATION Z85C30 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
-F: drivers/serial/zs.*
+F: drivers/tty/serial/zs.*
GRE DEMULTIPLEXER DRIVER
M: Dmitry Kozlov <xeb@mail.ru>
diff --git a/Makefile b/Makefile
index abb49bf8596e..1f474953427f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index fc95ee1bcf6f..47f63d480141 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -8,6 +8,9 @@ config ALPHA
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select AUTO_IRQ_AFFINITY if SMP
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -68,22 +71,6 @@ config GENERIC_IOMAP
bool
default n
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
-config AUTO_IRQ_AFFINITY
- bool
- depends on SMP
- default y
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
index 2b9cf56db363..212ead354a6b 100644
--- a/arch/arm/configs/ag5evm_defconfig
+++ b/arch/arm/configs/ag5evm_defconfig
@@ -10,7 +10,7 @@ CONFIG_NAMESPACES=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 5536c488dd01..f0dea52e49c4 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -3,7 +3,7 @@ CONFIG_LOCALVERSION="gum"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_EPOLL is not set
# CONFIG_SHMEM is not set
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
index 695e32d4fb58..1b1158ae8f82 100644
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ b/arch/arm/configs/at572d940hfek_defconfig
@@ -17,7 +17,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 3a1ad15a779f..5b54abbeb0b3 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -1,6 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
CONFIG_ARCH_SA1100=y
diff --git a/arch/arm/configs/bcmring_defconfig b/arch/arm/configs/bcmring_defconfig
index 75984cd1e233..795374d48f81 100644
--- a/arch/arm/configs/bcmring_defconfig
+++ b/arch/arm/configs/bcmring_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_HOTPLUG is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index dcfbcf3b6c3e..a93ff8da5bab 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index f52c64e36d8d..2ef2c5e8aaec 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 310f9a6270be..6c56ad086c7c 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
CONFIG_SLOB=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 4a1fa81ed37d..e53c47563845 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index cdc40c4b8c48..88ccde058ba4 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 2519cc5a5f8f..889922ad229c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 9359e1bf32c1..54bf5eec8016 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
index c3194186920c..14559dbb4c2c 100644
--- a/arch/arm/configs/ebsa110_defconfig
+++ b/arch/arm/configs/ebsa110_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_ARCH_EBSA110=y
CONFIG_PCCARD=m
diff --git a/arch/arm/configs/edb7211_defconfig b/arch/arm/configs/edb7211_defconfig
index 7b62be1561ea..d52ded350a12 100644
--- a/arch/arm/configs/edb7211_defconfig
+++ b/arch/arm/configs/edb7211_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_ARCH_CLPS711X=y
CONFIG_ARCH_EDB7211=y
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index d7db34f79702..60a21e01eb70 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 6d6689cdf398..8e97b2f7ceec 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -4,7 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index 1691dea582fe..d68ac67c201c 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index c4eeb6d1cbf0..227a477346ed 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 4f925ead2617..038518ab39a8 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_MODULES=y
CONFIG_ARCH_FOOTBRIDGE=y
diff --git a/arch/arm/configs/fortunet_defconfig b/arch/arm/configs/fortunet_defconfig
index e11c7eab8ed0..840fced7529f 100644
--- a/arch/arm/configs/fortunet_defconfig
+++ b/arch/arm/configs/fortunet_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_ARCH_CLPS711X=y
CONFIG_ARCH_FORTUNET=y
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index ac336f10000c..37903e3f0efc 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -4,7 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index ade55c8c408b..176ec22af034 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -6,7 +6,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp2000_defconfig b/arch/arm/configs/ixp2000_defconfig
index 908324684549..8405aded97a3 100644
--- a/arch/arm/configs/ixp2000_defconfig
+++ b/arch/arm/configs/ixp2000_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp23xx_defconfig b/arch/arm/configs/ixp23xx_defconfig
index 7fc056a8569c..688717612e91 100644
--- a/arch/arm/configs/ixp23xx_defconfig
+++ b/arch/arm/configs/ixp23xx_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 5c5023934001..063e2ab2c8f1 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/loki_defconfig b/arch/arm/configs/loki_defconfig
index e1eaff7f5536..1ba752b2dc6d 100644
--- a/arch/arm/configs/loki_defconfig
+++ b/arch/arm/configs/loki_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/lpd7a400_defconfig b/arch/arm/configs/lpd7a400_defconfig
index 20caaaba4a04..5a48f171204c 100644
--- a/arch/arm/configs/lpd7a400_defconfig
+++ b/arch/arm/configs/lpd7a400_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/lpd7a404_defconfig b/arch/arm/configs/lpd7a404_defconfig
index 1efcce97b4a7..22d0631de009 100644
--- a/arch/arm/configs/lpd7a404_defconfig
+++ b/arch/arm/configs/lpd7a404_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
CONFIG_SLAB=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index af805e8fd03d..a88e64d4e9a5 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -4,7 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index b0d082422d46..7305ebddb510 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
diff --git a/arch/arm/configs/mx1_defconfig b/arch/arm/configs/mx1_defconfig
index 2f38d9715437..b39b5ced8a10 100644
--- a/arch/arm/configs/mx1_defconfig
+++ b/arch/arm/configs/mx1_defconfig
@@ -4,7 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx21_defconfig b/arch/arm/configs/mx21_defconfig
index 6454e18e2abe..411f88dd4402 100644
--- a/arch/arm/configs/mx21_defconfig
+++ b/arch/arm/configs/mx21_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/mx27_defconfig b/arch/arm/configs/mx27_defconfig
index 813cfb366c18..9ad4c656c9bd 100644
--- a/arch/arm/configs/mx27_defconfig
+++ b/arch/arm/configs/mx27_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
index e648ea3429be..7c4b30b34952 100644
--- a/arch/arm/configs/mx3_defconfig
+++ b/arch/arm/configs/mx3_defconfig
@@ -4,7 +4,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 5c7a87260fab..9cba68cfa51a 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_MODULES=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 0e2dc26ebe66..37207d1bf44b 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -7,7 +7,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index a350cc6bfe6a..7b63462b349d 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -6,7 +6,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ccedde1371c3..ae890caa17a7 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 439323b3b0ed..a288d7033950 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index 583a0610bd00..2f136c30a989 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -7,7 +7,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
index b42ee62c4d77..c75c9fcede58 100644
--- a/arch/arm/configs/pcontrol_g20_defconfig
+++ b/arch/arm/configs/pcontrol_g20_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index d1efbdc1e6dc..cb08cc561da5 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_SHMEM is not set
CONFIG_MODULES=y
diff --git a/arch/arm/configs/pnx4008_defconfig b/arch/arm/configs/pnx4008_defconfig
index bd481f04276f..35a31ccacc32 100644
--- a/arch/arm/configs/pnx4008_defconfig
+++ b/arch/arm/configs/pnx4008_defconfig
@@ -5,7 +5,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/simpad_defconfig b/arch/arm/configs/simpad_defconfig
index af3b12e3b464..d3358155bf8a 100644
--- a/arch/arm/configs/simpad_defconfig
+++ b/arch/arm/configs/simpad_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="oe1"
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index aebd4bb0ad01..70158273c6dd 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
diff --git a/arch/arm/configs/stmp378x_defconfig b/arch/arm/configs/stmp378x_defconfig
index 94a2d904bf94..1079c2b6eb3a 100644
--- a/arch/arm/configs/stmp378x_defconfig
+++ b/arch/arm/configs/stmp378x_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/stmp37xx_defconfig b/arch/arm/configs/stmp37xx_defconfig
index d8ee58cfa872..564a5cc44085 100644
--- a/arch/arm/configs/stmp37xx_defconfig
+++ b/arch/arm/configs/stmp37xx_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index e89ca19489c2..95c0f0d63db6 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_BUG is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 37f48342827c..3162173fa75a 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -7,7 +7,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index c1c252cdca60..4a5a12681be2 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_AIO is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_MODULES=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 9d7bf5e0d0f5..8b0c717378fa 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=13
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
# CONFIG_SHMEM is not set
CONFIG_SLAB=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 70d47dbae6db..5b5504143647 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SHMEM is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 2e8391307f55..6dde8185205f 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -43,7 +43,7 @@ static const unsigned qsd8x50_surf_smc91x_gpio __initdata = 156;
* at run-time: they vary from board to board, and the true
* configuration won't be known until boot.
*/
-static struct resource smc91x_resources[] __initdata = {
+static struct resource smc91x_resources[] = {
[0] = {
.flags = IORESOURCE_MEM,
},
@@ -52,7 +52,7 @@ static struct resource smc91x_resources[] __initdata = {
},
};
-static struct platform_device smc91x_device __initdata = {
+static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 8d2f2daba0c0..e0a028161dde 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -9,6 +9,7 @@ config ARCH_OMAP730
depends on ARCH_OMAP1
bool "OMAP730 Based System"
select CPU_ARM926T
+ select OMAP_MPU_TIMER
select ARCH_OMAP_OTG
config ARCH_OMAP850
@@ -22,6 +23,7 @@ config ARCH_OMAP15XX
default y
bool "OMAP15xx Based System"
select CPU_ARM925T
+ select OMAP_MPU_TIMER
config ARCH_OMAP16XX
depends on ARCH_OMAP1
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 6ee19504845f..ba6009f27677 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,12 +3,11 @@
#
# Common support
-obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
+obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
obj-y += clock.o clock_data.o opp_data.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
-obj-$(CONFIG_OMAP_MPU_TIMER) += time.o
obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
# Power Management
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index ed7a61ff916a..f83fc335c613 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -44,16 +44,21 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <asm/leds.h>
#include <asm/irq.h>
+#include <asm/sched_clock.h>
+
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <plat/common.h>
+#ifdef CONFIG_OMAP_MPU_TIMER
+
#define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE
#define OMAP_MPU_TIMER_OFFSET 0x100
@@ -67,7 +72,7 @@ typedef struct {
((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \
(n)*OMAP_MPU_TIMER_OFFSET))
-static inline unsigned long omap_mpu_timer_read(int nr)
+static inline unsigned long notrace omap_mpu_timer_read(int nr)
{
volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr);
return timer->read_tim;
@@ -212,6 +217,32 @@ static struct clocksource clocksource_mpu = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static DEFINE_CLOCK_DATA(cd);
+
+static inline unsigned long long notrace _omap_mpu_sched_clock(void)
+{
+ u32 cyc = mpu_read(&clocksource_mpu);
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+#ifndef CONFIG_OMAP_32K_TIMER
+unsigned long long notrace sched_clock(void)
+{
+ return _omap_mpu_sched_clock();
+}
+#else
+static unsigned long long notrace omap_mpu_sched_clock(void)
+{
+ return _omap_mpu_sched_clock();
+}
+#endif
+
+static void notrace mpu_update_sched_clock(void)
+{
+ u32 cyc = mpu_read(&clocksource_mpu);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
static void __init omap_init_clocksource(unsigned long rate)
{
static char err[] __initdata = KERN_ERR
@@ -219,17 +250,13 @@ static void __init omap_init_clocksource(unsigned long rate)
setup_irq(INT_TIMER2, &omap_mpu_timer2_irq);
omap_mpu_timer_start(1, ~0, 1);
+ init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
if (clocksource_register_hz(&clocksource_mpu, rate))
printk(err, clocksource_mpu.name);
}
-/*
- * ---------------------------------------------------------------------------
- * Timer initialization
- * ---------------------------------------------------------------------------
- */
-static void __init omap_timer_init(void)
+static void __init omap_mpu_timer_init(void)
{
struct clk *ck_ref = clk_get(NULL, "ck_ref");
unsigned long rate;
@@ -246,6 +273,66 @@ static void __init omap_timer_init(void)
omap_init_clocksource(rate);
}
+#else
+static inline void omap_mpu_timer_init(void)
+{
+ pr_err("Bogus timer, should not happen\n");
+}
+#endif /* CONFIG_OMAP_MPU_TIMER */
+
+#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
+static unsigned long long (*preferred_sched_clock)(void);
+
+unsigned long long notrace sched_clock(void)
+{
+ if (!preferred_sched_clock)
+ return 0;
+
+ return preferred_sched_clock();
+}
+
+static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
+{
+ if (use_32k_sched_clock)
+ preferred_sched_clock = omap_32k_sched_clock;
+ else
+ preferred_sched_clock = omap_mpu_sched_clock;
+}
+#else
+static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
+{
+}
+#endif
+
+static inline int omap_32k_timer_usable(void)
+{
+ int res = false;
+
+ if (cpu_is_omap730() || cpu_is_omap15xx())
+ return res;
+
+#ifdef CONFIG_OMAP_32K_TIMER
+ res = omap_32k_timer_init();
+#endif
+
+ return res;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * Timer initialization
+ * ---------------------------------------------------------------------------
+ */
+static void __init omap_timer_init(void)
+{
+ if (omap_32k_timer_usable()) {
+ preferred_sched_clock_init(1);
+ } else {
+ omap_mpu_timer_init();
+ preferred_sched_clock_init(0);
+ }
+}
+
struct sys_timer omap_timer = {
.init = omap_timer_init,
};
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 20cfbcc6c60c..13d7b8f145bd 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -52,10 +52,9 @@
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+#include <plat/common.h>
#include <plat/dmtimer.h>
-struct sys_timer omap_timer;
-
/*
* ---------------------------------------------------------------------------
* 32KHz OS timer
@@ -181,14 +180,14 @@ static __init void omap_init_32k_timer(void)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-static void __init omap_timer_init(void)
+bool __init omap_32k_timer_init(void)
{
+ omap_init_clocksource_32k();
+
#ifdef CONFIG_OMAP_DM_TIMER
omap_dm_timer_init();
#endif
omap_init_32k_timer();
-}
-struct sys_timer omap_timer = {
- .init = omap_timer_init,
-};
+ return true;
+}
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 5b0c77732dfc..8f9a64d650ee 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -124,8 +124,9 @@ static inline void cm_t3517_init_hecc(void) {}
#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
#define RTC_IO_GPIO (153)
#define RTC_WR_GPIO (154)
-#define RTC_RD_GPIO (160)
+#define RTC_RD_GPIO (53)
#define RTC_CS_GPIO (163)
+#define RTC_CS_EN_GPIO (160)
struct v3020_platform_data cm_t3517_v3020_pdata = {
.use_gpio = 1,
@@ -145,6 +146,16 @@ static struct platform_device cm_t3517_rtc_device = {
static void __init cm_t3517_init_rtc(void)
{
+ int err;
+
+ err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en");
+ if (err) {
+ pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
+ return;
+ }
+
+ gpio_direction_output(RTC_CS_EN_GPIO, 1);
+
platform_device_register(&cm_t3517_rtc_device);
}
#else
@@ -214,12 +225,12 @@ static struct mtd_partition cm_t3517_nand_partitions[] = {
},
{
.name = "linux",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x2A0000 */
.size = 32 * NAND_BLOCK_SIZE,
},
{
.name = "rootfs",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x6A0000 */
.size = MTDPART_SIZ_FULL,
},
};
@@ -256,11 +267,19 @@ static void __init cm_t3517_init_irq(void)
static struct omap_board_mux board_mux[] __initdata = {
/* GPIO186 - Green LED */
OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
- /* RTC GPIOs: IO, WR#, RD#, CS# */
+
+ /* RTC GPIOs: */
+ /* IO - GPIO153 */
OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ /* WR# - GPIO154 */
OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
- OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ /* RD# - GPIO53 */
+ OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ /* CS# - GPIO163 */
OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+ /* CS EN - GPIO160 */
+ OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
/* HSUSB1 RESET */
OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
/* HSUSB2 RESET */
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 00bb1fc5e017..e906e05bb41b 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -275,8 +275,7 @@ static struct twl4030_gpio_platform_data devkit8000_gpio_data = {
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
.use_leds = true,
- .pullups = BIT(1),
- .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+ .pulldowns = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
| BIT(15) | BIT(16) | BIT(17),
.setup = devkit8000_twl_gpio_setup,
};
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index e8cb32fd7f13..de9ec8ddd2ae 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -34,7 +34,6 @@
#include "cm2_44xx.h"
#include "cm-regbits-44xx.h"
#include "prm44xx.h"
-#include "prm44xx.h"
#include "prm-regbits-44xx.h"
#include "control.h"
#include "scrm44xx.h"
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index e20b98636ab4..58e42f76603f 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -423,6 +423,12 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
struct clkdm_dep *cd;
+ if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+ pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+ clkdm1->name, clkdm2->name, __func__);
+ return -EINVAL;
+ }
+
if (!clkdm1 || !clkdm2)
return -EINVAL;
@@ -458,6 +464,12 @@ int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
{
struct clkdm_dep *cd;
+ if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+ pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+ clkdm1->name, clkdm2->name, __func__);
+ return -EINVAL;
+ }
+
if (!clkdm1 || !clkdm2)
return -EINVAL;
@@ -500,6 +512,12 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
if (!clkdm1 || !clkdm2)
return -EINVAL;
+ if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+ pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+ clkdm1->name, clkdm2->name, __func__);
+ return -EINVAL;
+ }
+
cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
if (IS_ERR(cd)) {
pr_debug("clockdomain: hardware cannot set/clear wake up of "
@@ -527,6 +545,12 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
struct clkdm_dep *cd;
u32 mask = 0;
+ if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+ pr_err("clockdomain: %s: %s: not yet implemented\n",
+ clkdm->name, __func__);
+ return -EINVAL;
+ }
+
if (!clkdm)
return -EINVAL;
@@ -830,8 +854,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
* dependency code and data for OMAP4.
*/
if (cpu_is_omap44xx()) {
- WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
- "support is not yet implemented\n");
+ pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
} else {
if (atomic_read(&clkdm->usecount) > 0)
_clkdm_add_autodeps(clkdm);
@@ -872,8 +895,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
* dependency code and data for OMAP4.
*/
if (cpu_is_omap44xx()) {
- WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
- "support is not yet implemented\n");
+ pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
} else {
if (atomic_read(&clkdm->usecount) > 0)
_clkdm_del_autodeps(clkdm);
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 51920fc7fc52..10622c914abc 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -30,8 +30,6 @@
#include "cm1_44xx.h"
#include "cm2_44xx.h"
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
#include "cm-regbits-44xx.h"
#include "prm44xx.h"
#include "prcm44xx.h"
diff --git a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
index d5233890370c..cf600e22bf8e 100644
--- a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
@@ -19,7 +19,6 @@
#include <plat/prcm.h>
#include "powerdomain.h"
-#include "prm-regbits-34xx.h"
#include "prm.h"
#include "prm-regbits-24xx.h"
#include "prm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 4e48e786bec7..7b7c2683ae7b 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -42,6 +42,8 @@
#include "timer-gp.h"
+#include <plat/common.h>
+
/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
#define MAX_GPTIMER_ID 12
@@ -176,10 +178,14 @@ static void __init omap2_gp_clockevent_init(void)
/*
* When 32k-timer is enabled, don't use GPTimer for clocksource
* instead, just leave default clocksource which uses the 32k
- * sync counter. See clocksource setup in see plat-omap/common.c.
+ * sync counter. See clocksource setup in plat-omap/counter_32k.c
*/
-static inline void __init omap2_gp_clocksource_init(void) {}
+static void __init omap2_gp_clocksource_init(void)
+{
+ omap_init_clocksource_32k();
+}
+
#else
/*
* clocksource
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 18fe3cb195dc..b6333ae3f92a 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -144,12 +144,9 @@ config OMAP_IOMMU_DEBUG
config OMAP_IOMMU_IVA2
bool
-choice
- prompt "System timer"
- default OMAP_32K_TIMER if !ARCH_OMAP15XX
-
config OMAP_MPU_TIMER
bool "Use mpu timer"
+ depends on ARCH_OMAP1
help
Select this option if you want to use the OMAP mpu timer. This
timer provides more intra-tick resolution than the 32KHz timer,
@@ -158,6 +155,7 @@ config OMAP_MPU_TIMER
config OMAP_32K_TIMER
bool "Use 32KHz timer"
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
help
Select this option if you want to enable the OMAP 32KHz timer.
This timer saves power compared to the OMAP_MPU_TIMER, and has
@@ -165,8 +163,6 @@ config OMAP_32K_TIMER
intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
currently only available for OMAP16XX, 24XX, 34XX and OMAP4.
-endchoice
-
config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
depends on ARCH_OMAP3 && PM
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index ea4644021fb9..862dda95d61d 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -36,8 +36,6 @@
#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
-#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
-
#include <linux/clocksource.h>
/*
@@ -122,12 +120,24 @@ static DEFINE_CLOCK_DATA(cd);
#define SC_MULT 4000000000u
#define SC_SHIFT 17
-unsigned long long notrace sched_clock(void)
+static inline unsigned long long notrace _omap_32k_sched_clock(void)
{
u32 cyc = clocksource_32k.read(&clocksource_32k);
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
+#ifndef CONFIG_OMAP_MPU_TIMER
+unsigned long long notrace sched_clock(void)
+{
+ return _omap_32k_sched_clock();
+}
+#else
+unsigned long long notrace omap_32k_sched_clock(void)
+{
+ return _omap_32k_sched_clock();
+}
+#endif
+
static void notrace omap_update_sched_clock(void)
{
u32 cyc = clocksource_32k.read(&clocksource_32k);
@@ -160,7 +170,7 @@ void read_persistent_clock(struct timespec *ts)
*ts = *tsp;
}
-static int __init omap_init_clocksource_32k(void)
+int __init omap_init_clocksource_32k(void)
{
static char err[] __initdata = KERN_ERR
"%s: can't register clocksource!\n";
@@ -195,7 +205,3 @@ static int __init omap_init_clocksource_32k(void)
}
return 0;
}
-arch_initcall(omap_init_clocksource_32k);
-
-#endif /* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
-
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c4b2b478b1a5..85363084cc1a 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -53,7 +53,7 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
#endif
#define OMAP_DMA_ACTIVE 0x01
-#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
+#define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
@@ -1873,7 +1873,7 @@ static int omap2_dma_handle_ch(int ch)
printk(KERN_INFO "DMA misaligned error with device %d\n",
dma_chan[ch].dev_id);
- p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
+ p->dma_write(status, CSR, ch);
p->dma_write(1 << ch, IRQSTATUS_L0, ch);
/* read back the register to flush the write */
p->dma_read(IRQSTATUS_L0, ch);
@@ -1893,10 +1893,9 @@ static int omap2_dma_handle_ch(int ch)
OMAP_DMA_CHAIN_INCQHEAD(chain_id);
status = p->dma_read(CSR, ch);
+ p->dma_write(status, CSR, ch);
}
- p->dma_write(status, CSR, ch);
-
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, status, dma_chan[ch].data);
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 6b8088ec74af..29b2afb4288f 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -35,6 +35,9 @@ struct sys_timer;
extern void omap_map_common_io(void);
extern struct sys_timer omap_timer;
+extern bool omap_32k_timer_init(void);
+extern int __init omap_init_clocksource_32k(void);
+extern unsigned long long notrace omap_32k_sched_clock(void);
extern void omap_reserve(void);
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 313b13073c54..cd2062fe0f61 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -1,8 +1,8 @@
config AVR32
def_bool y
- # With EMBEDDED=n, we get lots of stuff automatically selected
+ # With EXPERT=n, we get lots of stuff automatically selected
# that we usually don't need on AVR32.
- select EMBEDDED
+ select EXPERT
select HAVE_CLK
select HAVE_OPROFILE
select HAVE_KPROBES
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 0a221d48152d..c09577ddc3c5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -30,6 +30,9 @@ config BLACKFIN
select HAVE_KERNEL_LZO if RAMKERNEL
select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select IRQ_PER_CPU if SMP
config GENERIC_CSUM
def_bool y
@@ -44,15 +47,6 @@ config ZONE_DMA
config GENERIC_FIND_NEXT_BIT
def_bool y
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_IRQ_PROBE
- def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config GENERIC_GPIO
def_bool y
@@ -254,11 +248,6 @@ config HOTPLUG_CPU
depends on SMP && HOTPLUG
default y
-config IRQ_PER_CPU
- bool
- depends on SMP
- default y
-
config HAVE_LEGACY_PER_CPU_AREA
def_bool y
depends on SMP
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index c0b988ee30df..db8d38a12a9a 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 864af5b68874..3e50d7857c27 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
index 7b6a3370dbe2..362f59dd5228 100644
--- a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
+++ b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
# CONFIG_AIO is not set
CONFIG_SLAB=y
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 4faa6b46a352..023ff0df2692 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 9d893eb68243..4e5a121b3c56 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
index 97a2767c80f8..cd0636bb24a0 100644
--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index f84774360c5b..9f8fc84e4ac9 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0e7262c04cc2..ccc432b722a0 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 4d14a002e7bd..566695472a84 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index fbee9d776f56..ac22124ccb6c 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 05dd11db2f7d..944404b6ff08 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index bcb14d1c5664..b7c8451f26ac 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index 4cf451024fd8..7e67ba31e991 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 843aaa54a9e3..141e5933e1aa 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index dae7adf3b2a2..97ebe09a7370 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -6,7 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index f3414244bfed..c2457543e58c 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 8c7e08f173d4..baf1c1573e5e 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index bd3cb766d078..707cbf8a2590 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 82224f37c04e..4596935eadac 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 433598c6e773..df267588efec 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index ded7d845cb39..6c7b21585a43 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index 0ebc7d9aa426..f50313657f3e 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 700fb701c121..7450127b6455 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index b40156d217e3..5e797cf72043 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index be866d95ed76..a566a2fe6b9b 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index b64bdf759b82..853809510ee9 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index 1bccd9a50986..d496ae9a39b0 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 00ce899e9e5d..65f642167a50 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 613e62831c55..0a7a4c11d8b1 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -54,6 +54,8 @@ config CRIS
bool
default y
select HAVE_IDE
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config HZ
int
@@ -67,10 +69,6 @@ menu "General setup"
source "fs/Kconfig.binfmt"
-config GENERIC_HARDIRQS
- bool
- default y
-
config ETRAX_CMDLINE
string "Kernel command line"
default "root=/dev/mtdblock3"
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index a0c0df8be9c8..7328a7cf7449 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -104,43 +104,21 @@ static void (*interrupt[NR_IRQS])(void) = {
IRQ31_interrupt
};
-static void enable_crisv10_irq(unsigned int irq);
-
-static unsigned int startup_crisv10_irq(unsigned int irq)
-{
- enable_crisv10_irq(irq);
- return 0;
-}
-
-#define shutdown_crisv10_irq disable_crisv10_irq
-
-static void enable_crisv10_irq(unsigned int irq)
-{
- crisv10_unmask_irq(irq);
-}
-
-static void disable_crisv10_irq(unsigned int irq)
-{
- crisv10_mask_irq(irq);
-}
-
-static void ack_crisv10_irq(unsigned int irq)
+static void enable_crisv10_irq(struct irq_data *data)
{
+ crisv10_unmask_irq(data->irq);
}
-static void end_crisv10_irq(unsigned int irq)
+static void disable_crisv10_irq(struct irq_data *data)
{
+ crisv10_mask_irq(data->irq);
}
static struct irq_chip crisv10_irq_type = {
- .name = "CRISv10",
- .startup = startup_crisv10_irq,
- .shutdown = shutdown_crisv10_irq,
- .enable = enable_crisv10_irq,
- .disable = disable_crisv10_irq,
- .ack = ack_crisv10_irq,
- .end = end_crisv10_irq,
- .set_affinity = NULL
+ .name = "CRISv10",
+ .irq_shutdown = disable_crisv10_irq,
+ .irq_enable = enable_crisv10_irq,
+ .irq_disable = disable_crisv10_irq,
};
void weird_irq(void);
@@ -221,7 +199,8 @@ init_IRQ(void)
/* Initialize IRQ handler descriptors. */
for(i = 2; i < NR_IRQS; i++) {
- irq_desc[i].chip = &crisv10_irq_type;
+ set_irq_desc_and_handler(i, &crisv10_irq_type,
+ handle_simple_irq);
set_int_vector(i, interrupt[i]);
}
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 2ed48ae3d313..0ad9db5126c7 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -291,54 +291,33 @@ void crisv32_unmask_irq(int irq)
}
-static unsigned int startup_crisv32_irq(unsigned int irq)
+static void enable_crisv32_irq(struct irq_data *data)
{
- crisv32_unmask_irq(irq);
- return 0;
-}
-
-static void shutdown_crisv32_irq(unsigned int irq)
-{
- crisv32_mask_irq(irq);
+ crisv32_unmask_irq(data->irq);
}
-static void enable_crisv32_irq(unsigned int irq)
+static void disable_crisv32_irq(struct irq_data *data)
{
- crisv32_unmask_irq(irq);
+ crisv32_mask_irq(data->irq);
}
-static void disable_crisv32_irq(unsigned int irq)
-{
- crisv32_mask_irq(irq);
-}
-
-static void ack_crisv32_irq(unsigned int irq)
-{
-}
-
-static void end_crisv32_irq(unsigned int irq)
-{
-}
-
-int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
+static int set_affinity_crisv32_irq(struct irq_data *data,
+ const struct cpumask *dest, bool force)
{
unsigned long flags;
+
spin_lock_irqsave(&irq_lock, flags);
- irq_allocations[irq - FIRST_IRQ].mask = *dest;
+ irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
spin_unlock_irqrestore(&irq_lock, flags);
-
return 0;
}
static struct irq_chip crisv32_irq_type = {
- .name = "CRISv32",
- .startup = startup_crisv32_irq,
- .shutdown = shutdown_crisv32_irq,
- .enable = enable_crisv32_irq,
- .disable = disable_crisv32_irq,
- .ack = ack_crisv32_irq,
- .end = end_crisv32_irq,
- .set_affinity = set_affinity_crisv32_irq
+ .name = "CRISv32",
+ .irq_shutdown = disable_crisv32_irq,
+ .irq_enable = enable_crisv32_irq,
+ .irq_disable = disable_crisv32_irq,
+ .irq_set_affinity = set_affinity_crisv32_irq,
};
void
@@ -472,7 +451,8 @@ init_IRQ(void)
/* Point all IRQ's to bad handlers. */
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
- irq_desc[j].chip = &crisv32_irq_type;
+ set_irq_chip_and_handler(j, &crisv32_irq_type,
+ handle_simple_irq);
set_exception_vector(i, interrupt[j]);
}
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 590f72c9455d..71854d41c5a0 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etrax-100lx_v2_defconfig b/arch/cris/configs/etrax-100lx_v2_defconfig
index 1b2853e39801..a85aabf92be5 100644
--- a/arch/cris/configs/etrax-100lx_v2_defconfig
+++ b/arch/cris/configs/etrax-100lx_v2_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index f73d38cc9c66..87c7227fecb2 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 469f7f9d62e0..c346952f06dc 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -62,7 +62,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -93,8 +93,8 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
printk("do_IRQ: stack overflow: %lX\n", sp);
show_stack(NULL, (unsigned long *)sp);
}
- __do_IRQ(irq);
- irq_exit();
+ generic_handle_irq(irq);
+ irq_exit();
set_irq_regs(old_regs);
}
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index f6bcb039cd6d..747499a1b31e 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -5,6 +5,7 @@ config FRV
select HAVE_ARCH_TRACEHOOK
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select HAVE_GENERIC_HARDIRQS
config ZONE_DMA
bool
@@ -29,14 +30,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default n
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- bool
- default y
-
config TIME_LOW_RES
bool
default y
diff --git a/arch/frv/defconfig b/arch/frv/defconfig
index b8ebe9e8a493..b1b792610fdf 100644
--- a/arch/frv/defconfig
+++ b/arch/frv/defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_MMU=y
CONFIG_FRV_OUTOFLINE_ATOMIC_OPS=y
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 65f897d8c1e9..6df692d1475f 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -2,6 +2,8 @@ config H8300
bool
default y
select HAVE_IDE
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config SYMBOL_PREFIX
string
@@ -47,10 +49,6 @@ config GENERIC_HWEIGHT
bool
default y
-config GENERIC_HARDIRQS
- bool
- default y
-
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
index 342f77765f02..042425a02645 100644
--- a/arch/h8300/defconfig
+++ b/arch/h8300/defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index c25dc2c2b1da..7643d39925d6 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -38,34 +38,30 @@ static inline int is_ext_irq(unsigned int irq)
return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
}
-static void h8300_enable_irq(unsigned int irq)
+static void h8300_enable_irq(struct irq_data *data)
{
- if (is_ext_irq(irq))
- IER_REGS |= 1 << (irq - EXT_IRQ0);
+ if (is_ext_irq(data->irq))
+ IER_REGS |= 1 << (data->irq - EXT_IRQ0);
}
-static void h8300_disable_irq(unsigned int irq)
+static void h8300_disable_irq(struct irq_data *data)
{
- if (is_ext_irq(irq))
- IER_REGS &= ~(1 << (irq - EXT_IRQ0));
+ if (is_ext_irq(data->irq))
+ IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
}
-static void h8300_end_irq(unsigned int irq)
+static unsigned int h8300_startup_irq(struct irq_data *data)
{
-}
-
-static unsigned int h8300_startup_irq(unsigned int irq)
-{
- if (is_ext_irq(irq))
- return h8300_enable_irq_pin(irq);
+ if (is_ext_irq(data->irq))
+ return h8300_enable_irq_pin(data->irq);
else
return 0;
}
-static void h8300_shutdown_irq(unsigned int irq)
+static void h8300_shutdown_irq(struct irq_data *data)
{
- if (is_ext_irq(irq))
- h8300_disable_irq_pin(irq);
+ if (is_ext_irq(data->irq))
+ h8300_disable_irq_pin(data->irq);
}
/*
@@ -73,12 +69,10 @@ static void h8300_shutdown_irq(unsigned int irq)
*/
struct irq_chip h8300irq_chip = {
.name = "H8300-INTC",
- .startup = h8300_startup_irq,
- .shutdown = h8300_shutdown_irq,
- .enable = h8300_enable_irq,
- .disable = h8300_disable_irq,
- .ack = NULL,
- .end = h8300_end_irq,
+ .irq_startup = h8300_startup_irq,
+ .irq_shutdown = h8300_shutdown_irq,
+ .irq_enable = h8300_enable_irq,
+ .irq_disable = h8300_disable_irq,
};
#if defined(CONFIG_RAMKERNEL)
@@ -160,18 +154,14 @@ void __init init_IRQ(void)
setup_vector();
- for (c = 0; c < NR_IRQS; c++) {
- irq_desc[c].status = IRQ_DISABLED;
- irq_desc[c].action = NULL;
- irq_desc[c].depth = 1;
- irq_desc[c].chip = &h8300irq_chip;
- }
+ for (c = 0; c < NR_IRQS; c++)
+ set_irq_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
}
asmlinkage void do_IRQ(int irq)
{
irq_enter();
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irq_exit();
}
@@ -192,7 +182,7 @@ int show_interrupts(struct seq_file *p, void *v)
goto unlock;
seq_printf(p, "%3d: ",i);
seq_printf(p, "%10u ", kstat_irqs(i));
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e0f5b6d7f849..fcf3b437a2d9 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -22,6 +22,10 @@ config IA64
select HAVE_KVM
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select GENERIC_PENDING_IRQ if SMP
+ select IRQ_PER_CPU
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -678,28 +682,6 @@ source "arch/ia64/kvm/Kconfig"
source "lib/Kconfig"
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
-config GENERIC_PENDING_IRQ
- bool
- depends on GENERIC_HARDIRQS && SMP
- default y
-
-config IRQ_PER_CPU
- bool
- default y
-
config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 5c291d65196b..ef4c1e442be3 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -7,6 +7,9 @@ config M32R
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_PROBE
config SBUS
bool
@@ -19,14 +22,6 @@ config ZONE_DMA
bool
default y
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_IRQ_PROBE
- bool
- default y
-
config NO_IOPORT
def_bool y
diff --git a/arch/m32r/configs/m32700ut.smp_defconfig b/arch/m32r/configs/m32700ut.smp_defconfig
index 816c3ecaa2aa..a3d727ed6a16 100644
--- a/arch/m32r/configs/m32700ut.smp_defconfig
+++ b/arch/m32r/configs/m32700ut.smp_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/m32700ut.up_defconfig b/arch/m32r/configs/m32700ut.up_defconfig
index 84785686640a..b8334163099d 100644
--- a/arch/m32r/configs/m32700ut.up_defconfig
+++ b/arch/m32r/configs/m32700ut.up_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.nommu_defconfig b/arch/m32r/configs/mappi.nommu_defconfig
index 354a964d084d..7c90ce2fc42b 100644
--- a/arch/m32r/configs/mappi.nommu_defconfig
+++ b/arch/m32r/configs/mappi.nommu_defconfig
@@ -3,7 +3,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.smp_defconfig b/arch/m32r/configs/mappi.smp_defconfig
index 9022307bd073..367d07cebcd3 100644
--- a/arch/m32r/configs/mappi.smp_defconfig
+++ b/arch/m32r/configs/mappi.smp_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.up_defconfig b/arch/m32r/configs/mappi.up_defconfig
index 3726068721a5..cb11384386ce 100644
--- a/arch/m32r/configs/mappi.up_defconfig
+++ b/arch/m32r/configs/mappi.up_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.opsp_defconfig b/arch/m32r/configs/mappi2.opsp_defconfig
index 6136fad048e4..3bff779259b4 100644
--- a/arch/m32r/configs/mappi2.opsp_defconfig
+++ b/arch/m32r/configs/mappi2.opsp_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.vdec2_defconfig b/arch/m32r/configs/mappi2.vdec2_defconfig
index dce1fc7d67ed..75246c9c1af8 100644
--- a/arch/m32r/configs/mappi2.vdec2_defconfig
+++ b/arch/m32r/configs/mappi2.vdec2_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi3.smp_defconfig b/arch/m32r/configs/mappi3.smp_defconfig
index b204e2ecd0f1..27cefd41ac1f 100644
--- a/arch/m32r/configs/mappi3.smp_defconfig
+++ b/arch/m32r/configs/mappi3.smp_defconfig
@@ -5,7 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/oaks32r_defconfig b/arch/m32r/configs/oaks32r_defconfig
index 5aa4ea9ebb10..5087a510ca4f 100644
--- a/arch/m32r/configs/oaks32r_defconfig
+++ b/arch/m32r/configs/oaks32r_defconfig
@@ -2,7 +2,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/opsput_defconfig b/arch/m32r/configs/opsput_defconfig
index 8494c6a276e8..50c6f525db20 100644
--- a/arch/m32r/configs/opsput_defconfig
+++ b/arch/m32r/configs/opsput_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/usrv_defconfig b/arch/m32r/configs/usrv_defconfig
index 1df293bc2ab9..a3cfaaedab60 100644
--- a/arch/m32r/configs/usrv_defconfig
+++ b/arch/m32r/configs/usrv_defconfig
@@ -5,7 +5,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 7db26f1f082d..f745c1287f3a 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,8 +40,10 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ",i);
@@ -51,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, " %14s", desc->irq_data.chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -59,7 +61,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
}
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 402a59d7219b..4a693d02c1e1 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -39,39 +39,30 @@ static void enable_m32104ut_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_m32104ut(unsigned int irq)
+static void mask_m32104ut_irq(struct irq_data *data)
{
- disable_m32104ut_irq(irq);
+ disable_m32104ut_irq(data->irq);
}
-static void end_m32104ut_irq(unsigned int irq)
+static void unmask_m32104ut_irq(struct irq_data *data)
{
- enable_m32104ut_irq(irq);
+ enable_m32104ut_irq(data->irq);
}
-static unsigned int startup_m32104ut_irq(unsigned int irq)
+static void shutdown_m32104ut_irq(struct irq_data *data)
{
- enable_m32104ut_irq(irq);
- return (0);
-}
-
-static void shutdown_m32104ut_irq(unsigned int irq)
-{
- unsigned long port;
+ unsigned int irq = data->irq;
+ unsigned long port = irq2port(irq);
- port = irq2port(irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32104ut_irq_type =
{
- .name = "M32104UT-IRQ",
- .startup = startup_m32104ut_irq,
- .shutdown = shutdown_m32104ut_irq,
- .enable = enable_m32104ut_irq,
- .disable = disable_m32104ut_irq,
- .ack = mask_and_ack_m32104ut,
- .end = end_m32104ut_irq
+ .name = "M32104UT-IRQ",
+ .irq_shutdown = shutdown_m32104ut_irq,
+ .irq_unmask = unmask_m32104ut_irq,
+ .irq_mask = mask_m32104ut_irq,
};
void __init init_IRQ(void)
@@ -85,36 +76,29 @@ void __init init_IRQ(void)
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
- irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].chip = &m32104ut_irq_type;
- irq_desc[M32R_IRQ_INT0].action = 0;
- irq_desc[M32R_IRQ_INT0].depth = 1;
- icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
+ set_irq_chip_and_handler(M32R_IRQ_INT0, &m32104ut_irq_type,
+ handle_level_irq);
+ /* "H" level sense */
+ cu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11;
disable_m32104ut_irq(M32R_IRQ_INT0);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &m32104ut_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32104ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_m32104ut_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &m32104ut_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32104ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
disable_m32104ut_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &m32104ut_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32104ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
disable_m32104ut_irq(M32R_IRQ_SIO0_S);
#endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 80b1a026795a..2074bcc841eb 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -45,39 +45,30 @@ static void enable_m32700ut_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_m32700ut(unsigned int irq)
+static void mask_m32700ut(struct irq_data *data)
{
- disable_m32700ut_irq(irq);
+ disable_m32700ut_irq(data->irq);
}
-static void end_m32700ut_irq(unsigned int irq)
+static void unmask_m32700ut(struct irq_data *data)
{
- enable_m32700ut_irq(irq);
+ enable_m32700ut_irq(data->irq);
}
-static unsigned int startup_m32700ut_irq(unsigned int irq)
-{
- enable_m32700ut_irq(irq);
- return (0);
-}
-
-static void shutdown_m32700ut_irq(unsigned int irq)
+static void shutdown_m32700ut(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_irq_type =
{
- .name = "M32700UT-IRQ",
- .startup = startup_m32700ut_irq,
- .shutdown = shutdown_m32700ut_irq,
- .enable = enable_m32700ut_irq,
- .disable = disable_m32700ut_irq,
- .ack = mask_and_ack_m32700ut,
- .end = end_m32700ut_irq
+ .name = "M32700UT-IRQ",
+ .irq_shutdown = shutdown_m32700ut,
+ .irq_mask = mask_m32700ut,
+ .irq_unmask = unmask_m32700ut
};
/*
@@ -99,7 +90,6 @@ static void disable_m32700ut_pld_irq(unsigned int irq)
unsigned int pldirq;
pldirq = irq2pldirq(irq);
-// disable_m32700ut_irq(M32R_IRQ_INT1);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
outw(data, port);
@@ -111,50 +101,38 @@ static void enable_m32700ut_pld_irq(unsigned int irq)
unsigned int pldirq;
pldirq = irq2pldirq(irq);
-// enable_m32700ut_irq(M32R_IRQ_INT1);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
outw(data, port);
}
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
-{
- disable_m32700ut_pld_irq(irq);
-// mask_and_ack_m32700ut(M32R_IRQ_INT1);
-}
-
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
{
- enable_m32700ut_pld_irq(irq);
- end_m32700ut_irq(M32R_IRQ_INT1);
+ disable_m32700ut_pld_irq(data->irq);
}
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
{
- enable_m32700ut_pld_irq(irq);
- return (0);
+ enable_m32700ut_pld_irq(data->irq);
+ enable_m32700ut_irq(M32R_IRQ_INT1);
}
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld_irq(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2pldirq(irq);
-// shutdown_m32700ut_irq(M32R_IRQ_INT1);
+ pldirq = irq2pldirq(data->irq);
port = pldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_pld_irq_type =
{
- .name = "M32700UT-PLD-IRQ",
- .startup = startup_m32700ut_pld_irq,
- .shutdown = shutdown_m32700ut_pld_irq,
- .enable = enable_m32700ut_pld_irq,
- .disable = disable_m32700ut_pld_irq,
- .ack = mask_and_ack_m32700ut_pld,
- .end = end_m32700ut_pld_irq
+ .name = "M32700UT-PLD-IRQ",
+ .irq_shutdown = shutdown_m32700ut_pld_irq,
+ .irq_mask = mask_m32700ut_pld,
+ .irq_unmask = unmask_m32700ut_pld,
};
/*
@@ -188,42 +166,33 @@ static void enable_m32700ut_lanpld_irq(unsigned int irq)
outw(data, port);
}
-static void mask_and_ack_m32700ut_lanpld(unsigned int irq)
+static void mask_m32700ut_lanpld(struct irq_data *data)
{
- disable_m32700ut_lanpld_irq(irq);
+ disable_m32700ut_lanpld_irq(data->irq);
}
-static void end_m32700ut_lanpld_irq(unsigned int irq)
+static void unmask_m32700ut_lanpld(struct irq_data *data)
{
- enable_m32700ut_lanpld_irq(irq);
- end_m32700ut_irq(M32R_IRQ_INT0);
-}
-
-static unsigned int startup_m32700ut_lanpld_irq(unsigned int irq)
-{
- enable_m32700ut_lanpld_irq(irq);
- return (0);
+ enable_m32700ut_lanpld_irq(data->irq);
+ enable_m32700ut_irq(M32R_IRQ_INT0);
}
-static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lanpld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2lanpldirq(irq);
+ pldirq = irq2lanpldirq(data->irq);
port = lanpldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_lanpld_irq_type =
{
- .name = "M32700UT-PLD-LAN-IRQ",
- .startup = startup_m32700ut_lanpld_irq,
- .shutdown = shutdown_m32700ut_lanpld_irq,
- .enable = enable_m32700ut_lanpld_irq,
- .disable = disable_m32700ut_lanpld_irq,
- .ack = mask_and_ack_m32700ut_lanpld,
- .end = end_m32700ut_lanpld_irq
+ .name = "M32700UT-PLD-LAN-IRQ",
+ .irq_shutdown = shutdown_m32700ut_lanpld,
+ .irq_mask = mask_m32700ut_lanpld,
+ .irq_unmask = unmask_m32700ut_lanpld,
};
/*
@@ -257,143 +226,110 @@ static void enable_m32700ut_lcdpld_irq(unsigned int irq)
outw(data, port);
}
-static void mask_and_ack_m32700ut_lcdpld(unsigned int irq)
+static void mask_m32700ut_lcdpld(struct irq_data *data)
{
- disable_m32700ut_lcdpld_irq(irq);
+ disable_m32700ut_lcdpld_irq(data->irq);
}
-static void end_m32700ut_lcdpld_irq(unsigned int irq)
+static void unmask_m32700ut_lcdpld(struct irq_data *data)
{
- enable_m32700ut_lcdpld_irq(irq);
- end_m32700ut_irq(M32R_IRQ_INT2);
-}
-
-static unsigned int startup_m32700ut_lcdpld_irq(unsigned int irq)
-{
- enable_m32700ut_lcdpld_irq(irq);
- return (0);
+ enable_m32700ut_lcdpld_irq(data->irq);
+ enable_m32700ut_irq(M32R_IRQ_INT2);
}
-static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lcdpld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2lcdpldirq(irq);
+ pldirq = irq2lcdpldirq(data->irq);
port = lcdpldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_lcdpld_irq_type =
{
- .name = "M32700UT-PLD-LCD-IRQ",
- .startup = startup_m32700ut_lcdpld_irq,
- .shutdown = shutdown_m32700ut_lcdpld_irq,
- .enable = enable_m32700ut_lcdpld_irq,
- .disable = disable_m32700ut_lcdpld_irq,
- .ack = mask_and_ack_m32700ut_lcdpld,
- .end = end_m32700ut_lcdpld_irq
+ .name = "M32700UT-PLD-LCD-IRQ",
+ .irq_shutdown = shutdown_m32700ut_lcdpld,
+ .irq_mask = mask_m32700ut_lcdpld,
+ .irq_unmask = unmask_m32700ut_lcdpld,
};
void __init init_IRQ(void)
{
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/
- irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED;
- irq_desc[M32700UT_LAN_IRQ_LAN].chip = &m32700ut_lanpld_irq_type;
- irq_desc[M32700UT_LAN_IRQ_LAN].action = 0;
- irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(M32700UT_LAN_IRQ_LAN,
+ &m32700ut_lanpld_irq_type, handle_level_irq);
lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_m32700ut_irq(M32R_IRQ_MFT2);
/* SIO0 : receive */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_m32700ut_irq(M32R_IRQ_SIO0_R);
/* SIO0 : send */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_m32700ut_irq(M32R_IRQ_SIO0_S);
/* SIO1 : receive */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_m32700ut_irq(M32R_IRQ_SIO1_R);
/* SIO1 : send */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_m32700ut_irq(M32R_IRQ_SIO1_S);
/* DMA1 : */
- irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_DMA1].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_DMA1].action = 0;
- irq_desc[M32R_IRQ_DMA1].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_DMA1, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_DMA1].icucr = 0;
disable_m32700ut_irq(M32R_IRQ_DMA1);
#ifdef CONFIG_SERIAL_M32R_PLDSIO
/* INT#1: SIO0 Receive on PLD */
- irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_RCV].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
- irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV);
/* INT#1: SIO0 Send on PLD */
- irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_SND].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_SIO0_SND].action = 0;
- irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND);
#endif /* CONFIG_SERIAL_M32R_PLDSIO */
/* INT#1: CFC IREQ on PLD */
- irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_CFIREQ].action = 0;
- irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ);
/* INT#1: CFC Insert on PLD */
- irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
- irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT);
/* INT#1: CFC Eject on PLD */
- irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
- irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT);
@@ -413,13 +349,11 @@ void __init init_IRQ(void)
#if defined(CONFIG_USB)
outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
+ set_irq_chip_and_handler(M32700UT_LCD_IRQ_USB_INT1,
+ &m32700ut_lcdpld_irq_type, handle_level_irq);
- irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
- irq_desc[M32700UT_LCD_IRQ_USB_INT1].chip = &m32700ut_lcdpld_irq_type;
- irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0;
- irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1;
- lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
- disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
+ lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
+ disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
#endif
/*
* INT2# is used for BAT, USB, AUDIO
@@ -432,10 +366,8 @@ void __init init_IRQ(void)
/*
* INT3# is used for AR
*/
- irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].chip = &m32700ut_irq_type;
- irq_desc[M32R_IRQ_INT3].action = 0;
- irq_desc[M32R_IRQ_INT3].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT3, &m32700ut_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_m32700ut_irq(M32R_IRQ_INT3);
#endif /* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index ea00c84d6b1b..cdd8c4574027 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -38,40 +38,30 @@ static void enable_mappi_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
{
- disable_mappi_irq(irq);
+ disable_mappi_irq(data->irq);
}
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- enable_mappi_irq(irq);
+ enable_mappi_irq(data->irq);
}
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
- enable_mappi_irq(irq);
- return (0);
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip mappi_irq_type =
{
- .name = "MAPPI-IRQ",
- .startup = startup_mappi_irq,
- .shutdown = shutdown_mappi_irq,
- .enable = enable_mappi_irq,
- .disable = disable_mappi_irq,
- .ack = mask_and_ack_mappi,
- .end = end_mappi_irq
+ .name = "MAPPI-IRQ",
+ .irq_shutdown = shutdown_mappi,
+ .irq_mask = mask_mappi,
+ .irq_unmask = unmask_mappi,
};
void __init init_IRQ(void)
@@ -85,70 +75,54 @@ void __init init_IRQ(void)
#ifdef CONFIG_NE2000
/* INT0 : LAN controller (RTL8019AS) */
- irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_INT0].action = NULL;
- irq_desc[M32R_IRQ_INT0].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
disable_mappi_irq(M32R_IRQ_INT0);
#endif /* CONFIG_M32R_NE2000 */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = NULL;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = NULL;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = NULL;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = NULL;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = NULL;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_SERIAL_M32R_SIO */
#if defined(CONFIG_M32R_PCC)
/* INT1 : pccard0 interrupt */
- irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_INT1].action = NULL;
- irq_desc[M32R_IRQ_INT1].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
disable_mappi_irq(M32R_IRQ_INT1);
/* INT2 : pccard1 interrupt */
- irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT2].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_INT2].action = NULL;
- irq_desc[M32R_IRQ_INT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT2, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
disable_mappi_irq(M32R_IRQ_INT2);
#endif /* CONFIG_M32RPCC */
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index c049376d0270..9117c30ea365 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -46,126 +46,97 @@ static void enable_mappi2_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_mappi2(unsigned int irq)
+static void mask_mappi2(struct irq_data *data)
{
- disable_mappi2_irq(irq);
+ disable_mappi2_irq(data->irq);
}
-static void end_mappi2_irq(unsigned int irq)
+static void unmask_mappi2(struct irq_data *data)
{
- enable_mappi2_irq(irq);
+ enable_mappi2_irq(data->irq);
}
-static unsigned int startup_mappi2_irq(unsigned int irq)
-{
- enable_mappi2_irq(irq);
- return (0);
-}
-
-static void shutdown_mappi2_irq(unsigned int irq)
+static void shutdown_mappi2(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip mappi2_irq_type =
{
- .name = "MAPPI2-IRQ",
- .startup = startup_mappi2_irq,
- .shutdown = shutdown_mappi2_irq,
- .enable = enable_mappi2_irq,
- .disable = disable_mappi2_irq,
- .ack = mask_and_ack_mappi2,
- .end = end_mappi2_irq
+ .name = "MAPPI2-IRQ",
+ .irq_shutdown = shutdown_mappi2,
+ .irq_mask = mask_mappi2,
+ .irq_unmask = unmask_mappi2,
};
void __init init_IRQ(void)
{
#if defined(CONFIG_SMC91X)
/* INT0 : LAN controller (SMC91111) */
- irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_INT0].action = 0;
- irq_desc[M32R_IRQ_INT0].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi2_irq(M32R_IRQ_INT0);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi2_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi2_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi2_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi2_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi2_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_M32R_USE_DBG_CONSOLE */
#if defined(CONFIG_USB)
/* INT1 : USB Host controller interrupt */
- irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].chip = &mappi2_irq_type;
- irq_desc[M32R_IRQ_INT1].action = 0;
- irq_desc[M32R_IRQ_INT1].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi2_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
disable_mappi2_irq(M32R_IRQ_INT1);
#endif /* CONFIG_USB */
/* ICUCR40: CFC IREQ */
- irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].chip = &mappi2_irq_type;
- irq_desc[PLD_IRQ_CFIREQ].action = 0;
- irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi2_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
disable_mappi2_irq(PLD_IRQ_CFIREQ);
#if defined(CONFIG_M32R_CFC)
/* ICUCR41: CFC Insert */
- irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi2_irq_type;
- irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
- irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi2_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
disable_mappi2_irq(PLD_IRQ_CFC_INSERT);
/* ICUCR42: CFC Eject */
- irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].chip = &mappi2_irq_type;
- irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
- irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &mappi2_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi2_irq(PLD_IRQ_CFC_EJECT);
#endif /* CONFIG_MAPPI2_CFC */
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 882de25c6e8c..b44f5ded2bbe 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -46,128 +46,98 @@ static void enable_mappi3_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_mappi3(unsigned int irq)
+static void mask_mappi3(struct irq_data *data)
{
- disable_mappi3_irq(irq);
+ disable_mappi3_irq(data->irq);
}
-static void end_mappi3_irq(unsigned int irq)
+static void unmask_mappi3(struct irq_data *data)
{
- enable_mappi3_irq(irq);
+ enable_mappi3_irq(data->irq);
}
-static unsigned int startup_mappi3_irq(unsigned int irq)
-{
- enable_mappi3_irq(irq);
- return (0);
-}
-
-static void shutdown_mappi3_irq(unsigned int irq)
+static void shutdown_mappi3(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct irq_chip mappi3_irq_type =
-{
- .name = "MAPPI3-IRQ",
- .startup = startup_mappi3_irq,
- .shutdown = shutdown_mappi3_irq,
- .enable = enable_mappi3_irq,
- .disable = disable_mappi3_irq,
- .ack = mask_and_ack_mappi3,
- .end = end_mappi3_irq
+static struct irq_chip mappi3_irq_type = {
+ .name = "MAPPI3-IRQ",
+ .irq_shutdown = shutdown_mappi3,
+ .irq_mask = mask_mappi3,
+ .irq_unmask = unmask_mappi3,
};
void __init init_IRQ(void)
{
#if defined(CONFIG_SMC91X)
/* INT0 : LAN controller (SMC91111) */
- irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT0].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_INT0].action = 0;
- irq_desc[M32R_IRQ_INT0].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi3_irq(M32R_IRQ_INT0);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi3_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_M32R_USE_DBG_CONSOLE */
#if defined(CONFIG_USB)
/* INT1 : USB Host controller interrupt */
- irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT1].chip = &mappi3_irq_type;
- irq_desc[M32R_IRQ_INT1].action = 0;
- irq_desc[M32R_IRQ_INT1].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
disable_mappi3_irq(M32R_IRQ_INT1);
#endif /* CONFIG_USB */
/* CFC IREQ */
- irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].chip = &mappi3_irq_type;
- irq_desc[PLD_IRQ_CFIREQ].action = 0;
- irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
disable_mappi3_irq(PLD_IRQ_CFIREQ);
#if defined(CONFIG_M32R_CFC)
/* ICUCR41: CFC Insert & eject */
- irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi3_irq_type;
- irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
- irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
disable_mappi3_irq(PLD_IRQ_CFC_INSERT);
#endif /* CONFIG_M32R_CFC */
/* IDE IREQ */
- irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_IDEIREQ].chip = &mappi3_irq_type;
- irq_desc[PLD_IRQ_IDEIREQ].action = 0;
- irq_desc[PLD_IRQ_IDEIREQ].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type,
+ handle_level_irq);
icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi3_irq(PLD_IRQ_IDEIREQ);
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index d11d93bf74f5..19a02db7b818 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -37,39 +37,30 @@ static void enable_oaks32r_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_oaks32r(struct irq_data *data)
{
- disable_oaks32r_irq(irq);
+ disable_oaks32r_irq(data->irq);
}
-static void end_oaks32r_irq(unsigned int irq)
+static void unmask_oaks32r(struct irq_data *data)
{
- enable_oaks32r_irq(irq);
+ enable_oaks32r_irq(data->irq);
}
-static unsigned int startup_oaks32r_irq(unsigned int irq)
-{
- enable_oaks32r_irq(irq);
- return (0);
-}
-
-static void shutdown_oaks32r_irq(unsigned int irq)
+static void shutdown_oaks32r(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip oaks32r_irq_type =
{
- .name = "OAKS32R-IRQ",
- .startup = startup_oaks32r_irq,
- .shutdown = shutdown_oaks32r_irq,
- .enable = enable_oaks32r_irq,
- .disable = disable_oaks32r_irq,
- .ack = mask_and_ack_mappi,
- .end = end_oaks32r_irq
+ .name = "OAKS32R-IRQ",
+ .irq_shutdown = shutdown_oaks32r,
+ .irq_mask = mask_oaks32r,
+ .irq_unmask = unmask_oaks32r,
};
void __init init_IRQ(void)
@@ -83,52 +74,40 @@ void __init init_IRQ(void)
#ifdef CONFIG_NE2000
/* INT3 : LAN controller (RTL8019AS) */
- irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_INT3].action = 0;
- irq_desc[M32R_IRQ_INT3].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT3, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_oaks32r_irq(M32R_IRQ_INT3);
#endif /* CONFIG_M32R_NE2000 */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_oaks32r_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_oaks32r_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_oaks32r_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_oaks32r_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &oaks32r_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &oaks32r_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_oaks32r_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index 5f3402a2fbaf..12731547e8bf 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -46,39 +46,30 @@ static void enable_opsput_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_opsput(unsigned int irq)
+static void mask_opsput(struct irq_data *data)
{
- disable_opsput_irq(irq);
+ disable_opsput_irq(data->irq);
}
-static void end_opsput_irq(unsigned int irq)
+static void unmask_opsput(struct irq_data *data)
{
- enable_opsput_irq(irq);
+ enable_opsput_irq(data->irq);
}
-static unsigned int startup_opsput_irq(unsigned int irq)
-{
- enable_opsput_irq(irq);
- return (0);
-}
-
-static void shutdown_opsput_irq(unsigned int irq)
+static void shutdown_opsput(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip opsput_irq_type =
{
- .name = "OPSPUT-IRQ",
- .startup = startup_opsput_irq,
- .shutdown = shutdown_opsput_irq,
- .enable = enable_opsput_irq,
- .disable = disable_opsput_irq,
- .ack = mask_and_ack_opsput,
- .end = end_opsput_irq
+ .name = "OPSPUT-IRQ",
+ .irq_shutdown = shutdown_opsput,
+ .irq_mask = mask_opsput,
+ .irq_unmask = unmask_opsput,
};
/*
@@ -100,7 +91,6 @@ static void disable_opsput_pld_irq(unsigned int irq)
unsigned int pldirq;
pldirq = irq2pldirq(irq);
-// disable_opsput_irq(M32R_IRQ_INT1);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
outw(data, port);
@@ -112,50 +102,38 @@ static void enable_opsput_pld_irq(unsigned int irq)
unsigned int pldirq;
pldirq = irq2pldirq(irq);
-// enable_opsput_irq(M32R_IRQ_INT1);
port = pldirq2port(pldirq);
data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
outw(data, port);
}
-static void mask_and_ack_opsput_pld(unsigned int irq)
-{
- disable_opsput_pld_irq(irq);
-// mask_and_ack_opsput(M32R_IRQ_INT1);
-}
-
-static void end_opsput_pld_irq(unsigned int irq)
+static void mask_opsput_pld(struct irq_data *data)
{
- enable_opsput_pld_irq(irq);
- end_opsput_irq(M32R_IRQ_INT1);
+ disable_opsput_pld_irq(data->irq);
}
-static unsigned int startup_opsput_pld_irq(unsigned int irq)
+static void unmask_opsput_pld(struct irq_data *data)
{
- enable_opsput_pld_irq(irq);
- return (0);
+ enable_opsput_pld_irq(data->irq);
+ enable_opsput_irq(M32R_IRQ_INT1);
}
-static void shutdown_opsput_pld_irq(unsigned int irq)
+static void shutdown_opsput_pld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2pldirq(irq);
-// shutdown_opsput_irq(M32R_IRQ_INT1);
+ pldirq = irq2pldirq(data->irq);
port = pldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip opsput_pld_irq_type =
{
- .name = "OPSPUT-PLD-IRQ",
- .startup = startup_opsput_pld_irq,
- .shutdown = shutdown_opsput_pld_irq,
- .enable = enable_opsput_pld_irq,
- .disable = disable_opsput_pld_irq,
- .ack = mask_and_ack_opsput_pld,
- .end = end_opsput_pld_irq
+ .name = "OPSPUT-PLD-IRQ",
+ .irq_shutdown = shutdown_opsput_pld,
+ .irq_mask = mask_opsput_pld,
+ .irq_unmask = unmask_opsput_pld,
};
/*
@@ -189,42 +167,33 @@ static void enable_opsput_lanpld_irq(unsigned int irq)
outw(data, port);
}
-static void mask_and_ack_opsput_lanpld(unsigned int irq)
-{
- disable_opsput_lanpld_irq(irq);
-}
-
-static void end_opsput_lanpld_irq(unsigned int irq)
+static void mask_opsput_lanpld(struct irq_data *data)
{
- enable_opsput_lanpld_irq(irq);
- end_opsput_irq(M32R_IRQ_INT0);
+ disable_opsput_lanpld_irq(data->irq);
}
-static unsigned int startup_opsput_lanpld_irq(unsigned int irq)
+static void unmask_opsput_lanpld(struct irq_data *data)
{
- enable_opsput_lanpld_irq(irq);
- return (0);
+ enable_opsput_lanpld_irq(data->irq);
+ enable_opsput_irq(M32R_IRQ_INT0);
}
-static void shutdown_opsput_lanpld_irq(unsigned int irq)
+static void shutdown_opsput_lanpld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2lanpldirq(irq);
+ pldirq = irq2lanpldirq(data->irq);
port = lanpldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip opsput_lanpld_irq_type =
{
- .name = "OPSPUT-PLD-LAN-IRQ",
- .startup = startup_opsput_lanpld_irq,
- .shutdown = shutdown_opsput_lanpld_irq,
- .enable = enable_opsput_lanpld_irq,
- .disable = disable_opsput_lanpld_irq,
- .ack = mask_and_ack_opsput_lanpld,
- .end = end_opsput_lanpld_irq
+ .name = "OPSPUT-PLD-LAN-IRQ",
+ .irq_shutdown = shutdown_opsput_lanpld,
+ .irq_mask = mask_opsput_lanpld,
+ .irq_unmask = unmask_opsput_lanpld,
};
/*
@@ -258,143 +227,109 @@ static void enable_opsput_lcdpld_irq(unsigned int irq)
outw(data, port);
}
-static void mask_and_ack_opsput_lcdpld(unsigned int irq)
-{
- disable_opsput_lcdpld_irq(irq);
-}
-
-static void end_opsput_lcdpld_irq(unsigned int irq)
+static void mask_opsput_lcdpld(struct irq_data *data)
{
- enable_opsput_lcdpld_irq(irq);
- end_opsput_irq(M32R_IRQ_INT2);
+ disable_opsput_lcdpld_irq(data->irq);
}
-static unsigned int startup_opsput_lcdpld_irq(unsigned int irq)
+static void unmask_opsput_lcdpld(struct irq_data *data)
{
- enable_opsput_lcdpld_irq(irq);
- return (0);
+ enable_opsput_lcdpld_irq(data->irq);
+ enable_opsput_irq(M32R_IRQ_INT2);
}
-static void shutdown_opsput_lcdpld_irq(unsigned int irq)
+static void shutdown_opsput_lcdpld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2lcdpldirq(irq);
+ pldirq = irq2lcdpldirq(data->irq);
port = lcdpldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct irq_chip opsput_lcdpld_irq_type =
-{
- "OPSPUT-PLD-LCD-IRQ",
- startup_opsput_lcdpld_irq,
- shutdown_opsput_lcdpld_irq,
- enable_opsput_lcdpld_irq,
- disable_opsput_lcdpld_irq,
- mask_and_ack_opsput_lcdpld,
- end_opsput_lcdpld_irq
+static struct irq_chip opsput_lcdpld_irq_type = {
+ .name = "OPSPUT-PLD-LCD-IRQ",
+ .irq_shutdown = shutdown_opsput_lcdpld,
+ .irq_mask = mask_opsput_lcdpld,
+ .irq_unmask = unmask_opsput_lcdpld,
};
void __init init_IRQ(void)
{
#if defined(CONFIG_SMC91X)
/* INT#0: LAN controller on OPSPUT-LAN (SMC91C111)*/
- irq_desc[OPSPUT_LAN_IRQ_LAN].status = IRQ_DISABLED;
- irq_desc[OPSPUT_LAN_IRQ_LAN].chip = &opsput_lanpld_irq_type;
- irq_desc[OPSPUT_LAN_IRQ_LAN].action = 0;
- irq_desc[OPSPUT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(OPSPUT_LAN_IRQ_LAN, &opsput_lanpld_irq_type,
+ handle_level_irq);
lanpld_icu_data[irq2lanpldirq(OPSPUT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */
disable_opsput_lanpld_irq(OPSPUT_LAN_IRQ_LAN);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_opsput_irq(M32R_IRQ_MFT2);
/* SIO0 : receive */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_opsput_irq(M32R_IRQ_SIO0_R);
/* SIO0 : send */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_opsput_irq(M32R_IRQ_SIO0_S);
/* SIO1 : receive */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_opsput_irq(M32R_IRQ_SIO1_R);
/* SIO1 : send */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_opsput_irq(M32R_IRQ_SIO1_S);
/* DMA1 : */
- irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_DMA1].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_DMA1].action = 0;
- irq_desc[M32R_IRQ_DMA1].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_DMA1, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_DMA1].icucr = 0;
disable_opsput_irq(M32R_IRQ_DMA1);
#ifdef CONFIG_SERIAL_M32R_PLDSIO
/* INT#1: SIO0 Receive on PLD */
- irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_RCV].chip = &opsput_pld_irq_type;
- irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
- irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &opsput_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
disable_opsput_pld_irq(PLD_IRQ_SIO0_RCV);
/* INT#1: SIO0 Send on PLD */
- irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SIO0_SND].chip = &opsput_pld_irq_type;
- irq_desc[PLD_IRQ_SIO0_SND].action = 0;
- irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &opsput_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
disable_opsput_pld_irq(PLD_IRQ_SIO0_SND);
#endif /* CONFIG_SERIAL_M32R_PLDSIO */
/* INT#1: CFC IREQ on PLD */
- irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFIREQ].chip = &opsput_pld_irq_type;
- irq_desc[PLD_IRQ_CFIREQ].action = 0;
- irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &opsput_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_opsput_pld_irq(PLD_IRQ_CFIREQ);
/* INT#1: CFC Insert on PLD */
- irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_INSERT].chip = &opsput_pld_irq_type;
- irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
- irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &opsput_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */
disable_opsput_pld_irq(PLD_IRQ_CFC_INSERT);
/* INT#1: CFC Eject on PLD */
- irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CFC_EJECT].chip = &opsput_pld_irq_type;
- irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
- irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &opsput_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */
disable_opsput_pld_irq(PLD_IRQ_CFC_EJECT);
@@ -413,14 +348,11 @@ void __init init_IRQ(void)
enable_opsput_irq(M32R_IRQ_INT1);
#if defined(CONFIG_USB)
- outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
-
- irq_desc[OPSPUT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
- irq_desc[OPSPUT_LCD_IRQ_USB_INT1].chip = &opsput_lcdpld_irq_type;
- irq_desc[OPSPUT_LCD_IRQ_USB_INT1].action = 0;
- irq_desc[OPSPUT_LCD_IRQ_USB_INT1].depth = 1;
- lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
- disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
+ outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */
+ set_irq_chip_and_handler(OPSPUT_LCD_IRQ_USB_INT1,
+ &opsput_lcdpld_irq_type, handle_level_irq);
+ lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */
+ disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
#endif
/*
* INT2# is used for BAT, USB, AUDIO
@@ -433,10 +365,8 @@ void __init init_IRQ(void)
/*
* INT3# is used for AR
*/
- irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_INT3].chip = &opsput_irq_type;
- irq_desc[M32R_IRQ_INT3].action = 0;
- irq_desc[M32R_IRQ_INT3].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_INT3, &opsput_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_opsput_irq(M32R_IRQ_INT3);
#endif /* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 1beac7a51ed4..f3cff26d6e74 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -37,39 +37,30 @@ static void enable_mappi_irq(unsigned int irq)
outl(data, port);
}
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
{
- disable_mappi_irq(irq);
+ disable_mappi_irq(data->irq);
}
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
{
- enable_mappi_irq(irq);
+ enable_mappi_irq(data->irq);
}
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
- enable_mappi_irq(irq);
- return 0;
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
{
unsigned long port;
- port = irq2port(irq);
+ port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip mappi_irq_type =
{
- .name = "M32700-IRQ",
- .startup = startup_mappi_irq,
- .shutdown = shutdown_mappi_irq,
- .enable = enable_mappi_irq,
- .disable = disable_mappi_irq,
- .ack = mask_and_ack_mappi,
- .end = end_mappi_irq
+ .name = "M32700-IRQ",
+ .irq_shutdown = shutdown_mappi,
+ .irq_mask = mask_mappi,
+ .irq_unmask = unmask_mappi,
};
/*
@@ -107,42 +98,33 @@ static void enable_m32700ut_pld_irq(unsigned int irq)
outw(data, port);
}
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
{
- disable_m32700ut_pld_irq(irq);
+ disable_m32700ut_pld_irq(data->irq);
}
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
{
- enable_m32700ut_pld_irq(irq);
- end_mappi_irq(M32R_IRQ_INT1);
-}
-
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
-{
- enable_m32700ut_pld_irq(irq);
- return 0;
+ enable_m32700ut_pld_irq(data->irq);
+ enable_mappi_irq(M32R_IRQ_INT1);
}
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld(struct irq_data *data)
{
unsigned long port;
unsigned int pldirq;
- pldirq = irq2pldirq(irq);
+ pldirq = irq2pldirq(data->irq);
port = pldirq2port(pldirq);
outw(PLD_ICUCR_ILEVEL7, port);
}
static struct irq_chip m32700ut_pld_irq_type =
{
- .name = "USRV-PLD-IRQ",
- .startup = startup_m32700ut_pld_irq,
- .shutdown = shutdown_m32700ut_pld_irq,
- .enable = enable_m32700ut_pld_irq,
- .disable = disable_m32700ut_pld_irq,
- .ack = mask_and_ack_m32700ut_pld,
- .end = end_m32700ut_pld_irq
+ .name = "USRV-PLD-IRQ",
+ .irq_shutdown = shutdown_m32700ut_pld,
+ .irq_mask = mask_m32700ut_pld,
+ .irq_unmask = unmask_m32700ut_pld,
};
void __init init_IRQ(void)
@@ -156,53 +138,42 @@ void __init init_IRQ(void)
once++;
/* MFT2 : system timer */
- irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_MFT2].action = 0;
- irq_desc[M32R_IRQ_MFT2].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi_irq(M32R_IRQ_MFT2);
#if defined(CONFIG_SERIAL_M32R_SIO)
/* SIO0_R : uart receive data */
- irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO0_R].action = 0;
- irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
- irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO0_S].action = 0;
- irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
- irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO1_R].action = 0;
- irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
- irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
- irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
- irq_desc[M32R_IRQ_SIO1_S].action = 0;
- irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+ set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+ handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_SERIAL_M32R_SIO */
/* INT#67-#71: CFC#0 IREQ on PLD */
for (i = 0 ; i < CONFIG_M32R_CFC_NUM ; i++ ) {
- irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_CF0 + i].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_CF0 + i].action = 0;
- irq_desc[PLD_IRQ_CF0 + i].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_CF0 + i,
+ &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
= PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i);
@@ -210,19 +181,15 @@ void __init init_IRQ(void)
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
/* INT#76: 16552D#0 IREQ on PLD */
- irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_UART0].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_UART0].action = 0;
- irq_desc[PLD_IRQ_UART0].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_UART0, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
= PLD_ICUCR_ISMOD03; /* 'H' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_UART0);
/* INT#77: 16552D#1 IREQ on PLD */
- irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_UART1].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_UART1].action = 0;
- irq_desc[PLD_IRQ_UART1].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_UART1, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
= PLD_ICUCR_ISMOD03; /* 'H' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_UART1);
@@ -230,10 +197,8 @@ void __init init_IRQ(void)
#if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
/* INT#80: AK4524 IREQ on PLD */
- irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED;
- irq_desc[PLD_IRQ_SNDINT].chip = &m32700ut_pld_irq_type;
- irq_desc[PLD_IRQ_SNDINT].action = 0;
- irq_desc[PLD_IRQ_SNDINT].depth = 1; /* disable nested irq */
+ set_irq_chip_and_handler(PLD_IRQ_SNDINT, &m32700ut_pld_irq_type,
+ handle_level_irq);
pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
= PLD_ICUCR_ISMOD01; /* 'L' level sense */
disable_m32700ut_pld_irq(PLD_IRQ_SNDINT);
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 704e7b92334c..8b9dacaa0f6e 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -2,6 +2,7 @@ config M68K
bool
default y
select HAVE_IDE
+ select HAVE_GENERIC_HARDIRQS
config MMU
bool
@@ -48,14 +49,6 @@ config GENERIC_HWEIGHT
bool
default y
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- bool
- default y
-
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/m68knommu/configs/m5208evb_defconfig b/arch/m68knommu/configs/m5208evb_defconfig
index 6ac2981a2cdf..2f5655c577af 100644
--- a/arch/m68knommu/configs/m5208evb_defconfig
+++ b/arch/m68knommu/configs/m5208evb_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5249evb_defconfig b/arch/m68knommu/configs/m5249evb_defconfig
index 14934ff8d5c3..16df72bfbd45 100644
--- a/arch/m68knommu/configs/m5249evb_defconfig
+++ b/arch/m68knommu/configs/m5249evb_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5272c3_defconfig b/arch/m68knommu/configs/m5272c3_defconfig
index 5985a3b593d8..4e6ea50c7f33 100644
--- a/arch/m68knommu/configs/m5272c3_defconfig
+++ b/arch/m68knommu/configs/m5272c3_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5275evb_defconfig b/arch/m68knommu/configs/m5275evb_defconfig
index 5a7857efb45d..f3dd74115a34 100644
--- a/arch/m68knommu/configs/m5275evb_defconfig
+++ b/arch/m68knommu/configs/m5275evb_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5307c3_defconfig b/arch/m68knommu/configs/m5307c3_defconfig
index e8102018c8d4..bce0a20c3737 100644
--- a/arch/m68knommu/configs/m5307c3_defconfig
+++ b/arch/m68knommu/configs/m5307c3_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5407c3_defconfig b/arch/m68knommu/configs/m5407c3_defconfig
index 5c124a7ba2a7..618cc32691f2 100644
--- a/arch/m68knommu/configs/m5407c3_defconfig
+++ b/arch/m68knommu/configs/m5407c3_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 6ac2981a2cdf..2f5655c577af 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 5f5018a71a3d..31680032053e 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -15,6 +15,8 @@ config MICROBLAZE
select TRACING_SUPPORT
select OF
select OF_EARLY_FLATTREE
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
config SWAP
def_bool n
@@ -37,12 +39,6 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_IRQ_PROBE
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -52,9 +48,6 @@ config GENERIC_TIME_VSYSCALL
config GENERIC_CLOCKEVENTS
def_bool y
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config GENERIC_GPIO
def_bool y
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index ab8fbe7ad90b..b3f5eecff2a7 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -7,7 +7,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_HOTPLUG is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index ebc143c5368e..0249e4b7e1d3 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -6,7 +6,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_HOTPLUG is not set
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 548e6cc3bc28..f5ecc0566bc2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -793,9 +793,6 @@ config SCHED_OMIT_FRAME_POINTER
bool
default y
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
#
# Select some configuration options automatically based on user selections.
#
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f437cd1fafb8..5358f90b4dd2 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -7,7 +7,7 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
+ bool "Early printk" if EXPERT
depends on SYS_HAS_EARLY_PRINTK
default y
help
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index c78c7e7e41df..6cd5a519ce5c 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -14,7 +14,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_ELF_CORE is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 927d58b2cd03..22fdf2f0cc23 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -21,7 +21,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index b806a4e32896..919005139f5a 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -10,7 +10,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_TINY_RCU=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 9749bc8758db..1cdff6b6327d 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -26,7 +26,7 @@ CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index 502a8e9c084b..5135dc0b950a 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
index 3567b6f07b37..75165dfa60c1 100644
--- a/arch/mips/configs/cavium-octeon_defconfig
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -15,7 +15,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 6c4f7e9d3383..5419adb219a8 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index dda158b2c8dc..4044c9e0fb73 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -11,7 +11,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1100_defconfig b/arch/mips/configs/db1100_defconfig
index 7e4fc76df538..c6b49938ee84 100644
--- a/arch/mips/configs/db1100_defconfig
+++ b/arch/mips/configs/db1100_defconfig
@@ -11,7 +11,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1200_defconfig b/arch/mips/configs/db1200_defconfig
index 6fe205fa7b61..1f69249b839a 100644
--- a/arch/mips/configs/db1200_defconfig
+++ b/arch/mips/configs/db1200_defconfig
@@ -12,7 +12,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1500_defconfig b/arch/mips/configs/db1500_defconfig
index a741c55448d0..b6e21c7cb6bd 100644
--- a/arch/mips/configs/db1500_defconfig
+++ b/arch/mips/configs/db1500_defconfig
@@ -10,7 +10,7 @@ CONFIG_LOCALVERSION="-db1500"
CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1550_defconfig b/arch/mips/configs/db1550_defconfig
index cd32dd8c8008..798a553c9e80 100644
--- a/arch/mips/configs/db1550_defconfig
+++ b/arch/mips/configs/db1550_defconfig
@@ -11,7 +11,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index b15bfd1e69c8..87d0340837aa 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index 0b60c06a943d..0126e66d60cb 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 63944a14b816..e5b73de08fc5 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -17,7 +17,7 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 53edc134f274..48a40aefaf58 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -11,7 +11,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 36de199f4c27..d1606569b001 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -17,7 +17,7 @@ CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 4b16c48b0c36..0e36abcd39cc 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -15,7 +15,7 @@ CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 98f2c7736e87..4dbf6269b3f9 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 5bea99b26fa8..7bbd52194fc3 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 6ae46bcdb20b..92a60aecad5c 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index bf24e9309b9c..db5705e18b36 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -4,7 +4,7 @@ CONFIG_TOSHIBA_JMR3927=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index 6447261c61d0..d9f3db29ab95 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -8,7 +8,7 @@ CONFIG_HZ_1000=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index f7033f3a5822..167c1d07b809 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -21,7 +21,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 9d03b68aece8..7270f3183bda 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -15,7 +15,7 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_PID_NS=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 86bf001babe9..9c9a123016c0 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -9,7 +9,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig
index 4925f507dc21..b5ad7387bbb0 100644
--- a/arch/mips/configs/mipssim_defconfig
+++ b/arch/mips/configs/mipssim_defconfig
@@ -7,7 +7,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index efb779f8f6fe..c16de9812920 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index ab051458452b..d1142e9cd9a1 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -8,7 +8,7 @@ CONFIG_LOCALVERSION="-pmc"
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SHMEM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 814699754e0d..a97a42c6b2c8 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -11,7 +11,7 @@ CONFIG_AUDIT=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/pb1100_defconfig b/arch/mips/configs/pb1100_defconfig
index 1597aa1842fa..75eb1b1f316c 100644
--- a/arch/mips/configs/pb1100_defconfig
+++ b/arch/mips/configs/pb1100_defconfig
@@ -11,7 +11,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1200_defconfig b/arch/mips/configs/pb1200_defconfig
index 96f0d43cf08b..dcbe2704e5ed 100644
--- a/arch/mips/configs/pb1200_defconfig
+++ b/arch/mips/configs/pb1200_defconfig
@@ -12,7 +12,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1500_defconfig b/arch/mips/configs/pb1500_defconfig
index b4bfd4823458..fa00487146f8 100644
--- a/arch/mips/configs/pb1500_defconfig
+++ b/arch/mips/configs/pb1500_defconfig
@@ -11,7 +11,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1550_defconfig b/arch/mips/configs/pb1550_defconfig
index 5a660024d22a..e83d6497e8b4 100644
--- a/arch/mips/configs/pb1550_defconfig
+++ b/arch/mips/configs/pb1550_defconfig
@@ -11,7 +11,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pnx8335-stb225_defconfig b/arch/mips/configs/pnx8335-stb225_defconfig
index 39926a1a96b6..f2925769dfa3 100644
--- a/arch/mips/configs/pnx8335-stb225_defconfig
+++ b/arch/mips/configs/pnx8335-stb225_defconfig
@@ -11,7 +11,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/pnx8550-jbs_defconfig b/arch/mips/configs/pnx8550-jbs_defconfig
index 3376bc8616cc..1d1f2067f3e6 100644
--- a/arch/mips/configs/pnx8550-jbs_defconfig
+++ b/arch/mips/configs/pnx8550-jbs_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/pnx8550-stb810_defconfig b/arch/mips/configs/pnx8550-stb810_defconfig
index 6514f1bf0afb..15c66a571f99 100644
--- a/arch/mips/configs/pnx8550-stb810_defconfig
+++ b/arch/mips/configs/pnx8550-stb810_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
index f1f58e91dd80..3b0b6e8c8533 100644
--- a/arch/mips/configs/powertv_defconfig
+++ b/arch/mips/configs/powertv_defconfig
@@ -14,7 +14,7 @@ CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS_ALL=y
# CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index d6457bc38c71..55902d9cd0f2 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -13,7 +13,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_ELF_CORE is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 29acfab31516..9cba856277ff 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -12,7 +12,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 2b3e47653f60..2c0230e76d20 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -12,7 +12,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 64840d717750..5b0463ef9389 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -15,7 +15,7 @@ CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index d9be37fc9cb7..30036b4cbeb1 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index 3d25dd08907b..81bfa1d4d8e3 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index be697c9b23c6..c415c4f0e5c2 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index 7ec9287254d8..ee4b2be43c44 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig
index a231b73b1a40..44a451be359e 100644
--- a/arch/mips/configs/wrppmc_defconfig
+++ b/arch/mips/configs/wrppmc_defconfig
@@ -7,7 +7,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_EPOLL is not set
CONFIG_SLAB=y
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
index ab3a3dcec04d..f72d305a3f08 100644
--- a/arch/mips/configs/yosemite_defconfig
+++ b/arch/mips/configs/yosemite_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 8ed41cf2b08d..243bfa23fd58 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
config MN10300
def_bool y
select HAVE_OPROFILE
+ select GENERIC_HARDIRQS
config AM33_2
def_bool n
@@ -34,9 +35,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -79,10 +77,6 @@ config QUICKLIST
config ARCH_HAS_ILOG2_U32
def_bool y
-# Use the generic interrupt handling code in kernel/irq/
-config GENERIC_HARDIRQS
- def_bool y
-
config HOTPLUG_CPU
def_bool n
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index 3f749b69ca71..1fd41ec1dfb5 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -4,7 +4,7 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index 83ce2f27b12a..31d76261a3d5 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -15,7 +15,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_RELAY=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 0888675c98dd..fed2946f7335 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -12,7 +12,10 @@ config PARISC
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_HARDIRQS_NO__DO_IRQ
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select IRQ_PER_CPU
+
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
@@ -66,22 +69,9 @@ config TIME_LOW_RES
depends on SMP
default y
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_IRQ_PROBE
- def_bool y
-
config HAVE_LATENCYTOP_SUPPORT
def_bool y
-config IRQ_PER_CPU
- bool
- default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index f9305f30603a..b647b182dacc 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -8,7 +8,7 @@ CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 628d3e022535..311ca367b622 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 959f38ccb9a7..7d69e9bf5e64 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -36,24 +36,12 @@ config GENERIC_TIME_VSYSCALL
config GENERIC_CLOCKEVENTS
def_bool y
-config GENERIC_HARDIRQS
- bool
- default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- bool
- default y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool PPC64
-config IRQ_PER_CPU
- bool
- default y
-
config NR_IRQS
int "Number of virtual interrupt numbers"
range 32 32768
@@ -143,6 +131,9 @@ config PPC
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_SPARSE_IRQ
+ select IRQ_PER_CPU
config EARLY_PRINTK
bool
@@ -392,19 +383,6 @@ config IRQ_ALL_CPUS
CPU. Generally saying Y is safe, although some problems have been
reported with SMP Power Macintoshes with this option enabled.
-config SPARSE_IRQ
- bool "Support sparse irq numbering"
- default n
- help
- This enables support for sparse irqs. This is useful for distro
- kernels that want to define a high CONFIG_NR_CPUS value but still
- want to have low kernel memory footprint on smaller machines.
-
- ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
- out the irq_desc[] array in a more NUMA-friendly way. )
-
- If you don't know what to do here, say N.
-
config NUMA
bool "NUMA support"
depends on PPC64
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 96deec63bcf3..89178164af5e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -368,7 +368,7 @@ INSTALL := install
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
-dts-installed := $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
+dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
all-installed := $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index d3db02f98ddd..a0bd1881081e 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -109,7 +109,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
- compatible = "fsl,mpc8315-immr", "simple-bus";
+ compatible = "fsl,mpc8308-immr", "simple-bus";
ranges = <0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 2bbecbb4cbf9..69422eb24d97 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -291,13 +291,13 @@
ranges = <0x0 0xc100 0x200>;
cell-index = <1>;
dma00: dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
+ compatible = "fsl,ssi-dma-channel";
reg = <0x0 0x80>;
cell-index = <0>;
interrupts = <76 2>;
};
dma01: dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
+ compatible = "fsl,ssi-dma-channel";
reg = <0x80 0x80>;
cell-index = <1>;
interrupts = <77 2>;
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 97fedceaa30b..4182c772340b 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 33b3c24f4edd..2dbb293163f5 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
index 4613079a0ab1..ebeb4accad65 100644
--- a/arch/powerpc/configs/40x/hcu4_defconfig
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 34b8c1a1e752..532ea9d93a15 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 651be09136fa..3c142ac1b344 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index ded455e18339..ff57d4828ffc 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 63746a041d6b..3ed16d5c909d 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index f5f2a4e3e21b..b1b7d2c5c059 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index ac65b48b8ccd..30a0a8e08fdd 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_PCI_QUIRKS is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 17e4dd98eed7..a46942aac695 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index fedd03fdf5d5..07d77e51f1ba 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index ebff7011282e..2ce7e9aff09e 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 865e93fb41fd..18730ff9de7c 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -6,7 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 8ece4c774415..92f863ac8443 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_PROFILING=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 4ca9b4873c51..34c09144a699 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index e3b65d24207e..21c33faf61a2 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 64cd0f3421a9..01cc2b1a7f9a 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 01d03367917e..dfcffede16ad 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 89b2f9626137..47e399f2892f 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index e3386cf6f5b7..a6a002ed5681 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 9c13b9dffafa..6cf9d6614805 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index f234c4d0b15c..69b57daf402e 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index a4a795c80740..f3638ae0a627 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 20d53a1aa7e4..6828eda02bdc 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 6bd58338bf1a..7f7e4a878602 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 3a1f70292d9d..959cd2cfc275 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_KALLSYMS is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index eed42d8919e8..d2762d9dcb8e 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index e43ecb27dfd7..7a7b731c5735 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index c2e6ab51d335..c683bce4c26e 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 1d3b20065913..a721cd3d793f 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index 91fe73bd5ad2..a5699a1f7d0a 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 6d300f205604..b4da1a7e6449 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index b236a67e01fe..291f8221d5a6 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 001dead3cde9..f8b228aaa03a 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index 9dccefca00c3..99660c062191 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index d4b165d7d294..10b5c4cd0e72 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 89ba67274bda..45925d701d2a 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index 2ea6b405046a..f367985be6f7 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index bffe3c775030..414eda381591 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index fa5c9eefc9ad..6d6463fe06fc 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 385b1af37d75..8f7c1061891a 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_KSI8560=y
CONFIG_CPM2=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 222b704c1f4b..55e0725500dc 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MPC8540_ADS=y
CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index 619702de9477..d724095530a6 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MPC8560_ADS=y
CONFIG_BINFMT_MISC=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 6bf56e83f957..4b44beaa21ae 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MPC85xx_CDS=y
CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index a9a17d055766..5b2b651dfb98 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_SBC8548=y
diff --git a/arch/powerpc/configs/85xx/sbc8560_defconfig b/arch/powerpc/configs/85xx/sbc8560_defconfig
index 820e32d8c42b..f7fdb0318e4c 100644
--- a/arch/powerpc/configs/85xx/sbc8560_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8560_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_SBC8560=y
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index b6db3f47af99..77506b5d5a41 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 333a41bd2a68..5d4db154bf59 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 33db352f847e..ddcb9f37fa1f 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index f0c20dfbd4d3..981abd6d4b57 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index a883450dcdfa..37b3d7227cdd 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index ff95f90dc171..3593b320c97c 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 8d6c90ea4783..de413acc34d6 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index f53efe4a0e0c..5ea3124518fd 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -11,7 +11,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 432ebc28d25c..4b2441244eab 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index ce5e919d9b55..a360ba44b928 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 589e71e6dc1c..be2829dd129f 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 321fb47096d9..036bfb2d18cd 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_ELF_CORE is not set
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index b5e46399374e..0c9c7ed7ec75 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig
index 71145c3a64db..0a92ca045641 100644
--- a/arch/powerpc/configs/86xx/sbc8641d_defconfig
+++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index ca84c7fc24d5..69128740c14d 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig
index 94d120ef99cf..06f95492afc7 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/e55xx_smp_defconfig
@@ -12,7 +12,7 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 2677b08199e7..fceffb3cffbe 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -2,7 +2,7 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index f9a3112e5442..219fd470ed22 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index fcf0a398cd66..e74d3a483705 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
CONFIG_PERF_COUNTERS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index b9b63a609525..94ebfee188db 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_PPC_CHRP is not set
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index c4ed255af18b..39518e91822f 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -3,7 +3,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/mgsuvd_defconfig b/arch/powerpc/configs/mgsuvd_defconfig
index f276c7cf555b..2a490626015c 100644
--- a/arch/powerpc/configs/mgsuvd_defconfig
+++ b/arch/powerpc/configs/mgsuvd_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
# CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index 3b9470883de5..75f0bbf0f6e8 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index c7d68ff1a736..6a22400f73c1 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -2,7 +2,7 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5b1b10fd9740..5aac9a8bc53b 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 3aeb5949cfef..99a19d1e9bf8 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index d62c8016f4bc..c636f23f8c92 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -12,7 +12,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 668215cae890..5c258823e694 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_HOTPLUG is not set
# CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 63b90d477889..55b54318fef6 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index f9b83481b00e..9e146cdf63de 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 93d7425ce6cd..bfd634b5ada7 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 2fa05f7be4cb..47133202a625 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index a4353bef31c5..baad8db21b61 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -3,7 +3,7 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 49cffe003657..caba919f65d8 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -8,7 +8,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_PERF_EVENTS is not set
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index f87f0e15cfa7..9c3f22c6cde1 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -2,7 +2,7 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_SMP=y
-CONFIG_NR_CPUS=128
+CONFIG_NR_CPUS=1024
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -45,6 +45,8 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_PPC_64K_PAGES=y
+CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=m
CONFIG_HOTPLUG_PCI_RPA=m
@@ -184,6 +186,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_TIGON3=y
+CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
CONFIG_EHEA=y
@@ -311,9 +314,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
-CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 4f0c10a62b9d..ebb2a66c99d3 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -1,7 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index d0a5b6763880..8616fde0896f 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index bb8ba75b7c68..175295fbf4f3 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -7,7 +7,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
CONFIG_PERF_COUNTERS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 96a7d067fbb2..921a8470e18a 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -37,18 +37,21 @@ label##2: \
.align 2; \
label##3:
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
-label##4: \
- .popsection; \
- .pushsection sect,"a"; \
- .align 3; \
-label##5: \
- FTR_ENTRY_LONG msk; \
- FTR_ENTRY_LONG val; \
- FTR_ENTRY_OFFSET label##1b-label##5b; \
- FTR_ENTRY_OFFSET label##2b-label##5b; \
- FTR_ENTRY_OFFSET label##3b-label##5b; \
- FTR_ENTRY_OFFSET label##4b-label##5b; \
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
+label##4: \
+ .popsection; \
+ .pushsection sect,"a"; \
+ .align 3; \
+label##5: \
+ FTR_ENTRY_LONG msk; \
+ FTR_ENTRY_LONG val; \
+ FTR_ENTRY_OFFSET label##1b-label##5b; \
+ FTR_ENTRY_OFFSET label##2b-label##5b; \
+ FTR_ENTRY_OFFSET label##3b-label##5b; \
+ FTR_ENTRY_OFFSET label##4b-label##5b; \
+ .ifgt (label##4b-label##3b)-(label##2b-label##1b); \
+ .error "Feature section else case larger than body"; \
+ .endif; \
.popsection;
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 4e10f508570a..0edb6842b13d 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -467,13 +467,22 @@ struct qe_immap {
extern struct qe_immap __iomem *qe_immr;
extern phys_addr_t get_qe_base(void);
-static inline unsigned long immrbar_virt_to_phys(void *address)
+/*
+ * Returns the offset within the QE address space of the given pointer.
+ *
+ * Note that the QE does not support 36-bit physical addresses, so if
+ * get_qe_base() returns a number above 4GB, the caller will probably fail.
+ */
+static inline phys_addr_t immrbar_virt_to_phys(void *address)
{
- if ( ((u32)address >= (u32)qe_immr) &&
- ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
- return (unsigned long)(address - (u32)qe_immr +
- (u32)get_qe_base());
- return (unsigned long)virt_to_phys(address);
+ void *q = (void *)qe_immr;
+
+ /* Is it a MURAM address? */
+ if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
+ return get_qe_base() + (address - q);
+
+ /* It's an address returned by kmalloc */
+ return virt_to_phys(address);
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b85d8ddbb666..b0b06d85788d 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -12,24 +12,44 @@
#else
#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ mflr r0; \
+ stdu r1, -32(r1); \
+ std r0, 16(r1); \
+ stdu r1, -32(r1); \
+ bl func; \
+ ld r1, 0(r1); \
+ ld r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ bl func;
+#endif
+
/*
* Most of the CPU's IRQ-state tracing is done from assembly code; we
* have to call a C function so call a wrapper that saves all the
* C-clobbered registers.
*/
-#define TRACE_ENABLE_INTS bl .trace_hardirqs_on
-#define TRACE_DISABLE_INTS bl .trace_hardirqs_off
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
- cmpdi en,0; \
- bne 95f; \
- stb en,PACASOFTIRQEN(r13); \
- bl .trace_hardirqs_off; \
- b skip; \
-95: bl .trace_hardirqs_on; \
+#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
+#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
+
+#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
+ cmpdi en,0; \
+ bne 95f; \
+ stb en,PACASOFTIRQEN(r13); \
+ TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \
+ b skip; \
+95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \
li en,1;
#define TRACE_AND_RESTORE_IRQ(en) \
TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
- stb en,PACASOFTIRQEN(r13); \
+ stb en,PACASOFTIRQEN(r13); \
96:
#else
#define TRACE_ENABLE_INTS
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8433d36619a1..991d5998d6be 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -116,9 +116,6 @@ struct machdep_calls {
* If for some reason there is no irq, but the interrupt
* shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
unsigned int (*get_irq)(void);
-#ifdef CONFIG_KEXEC
- void (*kexec_cpu_down)(int crash_shutdown, int secondary);
-#endif
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
@@ -235,11 +232,7 @@ struct machdep_calls {
void (*machine_shutdown)(void);
#ifdef CONFIG_KEXEC
- /* Called to do the minimal shutdown needed to run a kexec'd kernel
- * to run successfully.
- * XXX Should we move this one out of kexec scope?
- */
- void (*machine_crash_shutdown)(struct pt_regs *regs);
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
/* Called to do what every setup is needed on image and the
* reboot code buffer. Returns 0 on success.
@@ -247,15 +240,6 @@ struct machdep_calls {
* claims to support kexec.
*/
int (*machine_kexec_prepare)(struct kimage *image);
-
- /* Called to handle any machine specific cleanup on image */
- void (*machine_kexec_cleanup)(struct kimage *image);
-
- /* Called to perform the _real_ kexec.
- * Do NOT allocate memory or fail here. We are past the point of
- * no return.
- */
- void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ff0005eec7dd..125fc1ad665d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -283,6 +283,7 @@
#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
+#ifdef CONFIG_6xx
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
@@ -292,6 +293,7 @@
#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
+#endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 667a498eaee1..e68c69bf741a 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -246,6 +246,20 @@
store or cache line push */
#endif
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
+#define HID1_RFXE 0x00020000 /* Read fault exception enable */
+#define HID1_R1DPE 0x00008000 /* R1 data bus parity enable */
+#define HID1_R2DPE 0x00004000 /* R2 data bus parity enable */
+#define HID1_ASTME 0x00002000 /* Address bus streaming mode enable */
+#define HID1_ABE 0x00001000 /* Address broadcast enable */
+#define HID1_MPXTT 0x00000400 /* MPX re-map transfer type */
+#define HID1_ATS 0x00000080 /* Atomic status */
+#define HID1_MID_MASK 0x0000000f /* MID input pins */
+#endif
+
/* Bit definitions for the DBSR. */
/*
* DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 0ab8d869e3d6..0c8b35d75232 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -203,14 +203,6 @@ void spu_irq_setaffinity(struct spu *spu, int cpu);
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
void *code, int code_size);
-#ifdef CONFIG_KEXEC
-void crash_register_spus(struct list_head *list);
-#else
-static inline void crash_register_spus(struct list_head *list)
-{
-}
-#endif
-
extern void spu_invalidate_slbs(struct spu *spu);
extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
int spu_64k_pages_available(void);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 894e64fa481e..5c518ad3445c 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -64,6 +64,12 @@ _GLOBAL(__setup_cpu_e500v2)
bl __e500_icache_setup
bl __e500_dcache_setup
bl __setup_e500_ivors
+#ifdef CONFIG_RAPIDIO
+ /* Ensure that RFXE is set */
+ mfspr r3,SPRN_HID1
+ oris r3,r3,HID1_RFXE@h
+ mtspr SPRN_HID1,r3
+#endif
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index be5ab18b03b5..8d74a24c5502 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -116,7 +116,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Power3+ */
@@ -132,7 +131,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Northstar */
@@ -148,7 +146,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Pulsar */
@@ -164,7 +161,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* I-star */
@@ -180,7 +176,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* S-star */
@@ -196,7 +191,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
- .machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Power4 */
@@ -212,7 +206,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "power4",
},
{ /* Power4+ */
@@ -228,7 +221,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "power4",
},
{ /* PPC970 */
@@ -247,7 +239,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -266,7 +257,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -285,7 +275,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -304,7 +293,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -322,7 +310,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
- .machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -343,7 +330,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
*/
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
- .machine_check = machine_check_generic,
.platform = "power5",
},
{ /* Power5++ */
@@ -360,7 +346,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
- .machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* Power5 GS */
@@ -378,7 +363,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
- .machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -390,7 +374,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
- .machine_check = machine_check_generic,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power5+",
@@ -413,7 +396,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
.oprofile_mmcra_clear = POWER6_MMCRA_THRM |
POWER6_MMCRA_OTHER,
- .machine_check = machine_check_generic,
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -425,7 +407,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
- .machine_check = machine_check_generic,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power6",
@@ -440,7 +421,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
MMU_FTR_TLBIE_206,
.icache_bsize = 128,
.dcache_bsize = 128,
- .machine_check = machine_check_generic,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power7",
@@ -492,7 +472,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/cell-be",
.oprofile_type = PPC_OPROFILE_CELL,
- .machine_check = machine_check_generic,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -510,7 +489,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_pa6t,
.oprofile_cpu_type = "ppc64/pa6t",
.oprofile_type = PPC_OPROFILE_PA6T,
- .machine_check = machine_check_generic,
.platform = "pa6t",
},
{ /* default match */
@@ -524,7 +502,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .machine_check = machine_check_generic,
.platform = "power4",
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 832c8c4db254..3d569e2aff18 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,7 +48,7 @@ int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
cpumask_t cpus_in_sr = CPU_MASK_NONE;
-#define CRASH_HANDLER_MAX 2
+#define CRASH_HANDLER_MAX 3
/* NULL terminated list of shutdown handles */
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
static DEFINE_SPINLOCK(crash_handlers_lock);
@@ -125,7 +125,7 @@ static void crash_kexec_prepare_cpus(int cpu)
smp_wmb();
/*
- * FIXME: Until we will have the way to stop other CPUSs reliabally,
+ * FIXME: Until we will have the way to stop other CPUs reliably,
* the crash CPU will send an IPI and wait for other CPUs to
* respond.
* Delay of at least 10 seconds.
@@ -254,72 +254,6 @@ void crash_kexec_secondary(struct pt_regs *regs)
cpus_in_sr = CPU_MASK_NONE;
}
#endif
-#ifdef CONFIG_SPU_BASE
-
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-struct crash_spu_info {
- struct spu *spu;
- u32 saved_spu_runcntl_RW;
- u32 saved_spu_status_R;
- u32 saved_spu_npc_RW;
- u64 saved_mfc_sr1_RW;
- u64 saved_mfc_dar;
- u64 saved_mfc_dsisr;
-};
-
-#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
-static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
-
-static void crash_kexec_stop_spus(void)
-{
- struct spu *spu;
- int i;
- u64 tmp;
-
- for (i = 0; i < CRASH_NUM_SPUS; i++) {
- if (!crash_spu_info[i].spu)
- continue;
-
- spu = crash_spu_info[i].spu;
-
- crash_spu_info[i].saved_spu_runcntl_RW =
- in_be32(&spu->problem->spu_runcntl_RW);
- crash_spu_info[i].saved_spu_status_R =
- in_be32(&spu->problem->spu_status_R);
- crash_spu_info[i].saved_spu_npc_RW =
- in_be32(&spu->problem->spu_npc_RW);
-
- crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
- crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
- tmp = spu_mfc_sr1_get(spu);
- crash_spu_info[i].saved_mfc_sr1_RW = tmp;
-
- tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
- spu_mfc_sr1_set(spu, tmp);
-
- __delay(200);
- }
-}
-
-void crash_register_spus(struct list_head *list)
-{
- struct spu *spu;
-
- list_for_each_entry(spu, list, full_list) {
- if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
- continue;
-
- crash_spu_info[spu->number].spu = spu;
- }
-}
-
-#else
-static inline void crash_kexec_stop_spus(void)
-{
-}
-#endif /* CONFIG_SPU_BASE */
/*
* Register a function to be called on shutdown. Only use this if you
@@ -439,8 +373,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
- crash_kexec_stop_spus();
-
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index c22dc1ec1c94..56212bc0ab08 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -880,7 +880,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
*/
andi. r10,r9,MSR_EE
beq 1f
+ /*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+ stwu r1,-32(r1)
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,-32(r1)
bl trace_hardirqs_on
+ lwz r1,0(r1)
+ lwz r1,0(r1)
lwz r9,_MSR(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index df7e20c191cd..49a170af8145 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/irq.h>
+#include <linux/ftrace.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -44,10 +45,7 @@ void machine_kexec_mask_interrupts(void) {
void machine_crash_shutdown(struct pt_regs *regs)
{
- if (ppc_md.machine_crash_shutdown)
- ppc_md.machine_crash_shutdown(regs);
- else
- default_machine_crash_shutdown(regs);
+ default_machine_crash_shutdown(regs);
}
/*
@@ -65,8 +63,6 @@ int machine_kexec_prepare(struct kimage *image)
void machine_kexec_cleanup(struct kimage *image)
{
- if (ppc_md.machine_kexec_cleanup)
- ppc_md.machine_kexec_cleanup(image);
}
void arch_crash_save_vmcoreinfo(void)
@@ -87,10 +83,13 @@ void arch_crash_save_vmcoreinfo(void)
*/
void machine_kexec(struct kimage *image)
{
- if (ppc_md.machine_kexec)
- ppc_md.machine_kexec(image);
- else
- default_machine_kexec(image);
+ int save_ftrace_enabled;
+
+ save_ftrace_enabled = __ftrace_enabled_save();
+
+ default_machine_kexec(image);
+
+ __ftrace_enabled_restore(save_ftrace_enabled);
/* Fall back to normal restart if we're still alive. */
machine_restart(NULL);
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 4dcf5f831e9d..b0dc8f7069cd 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -596,6 +596,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (left <= 0)
left = period;
record = 1;
+ event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 84906d3fc860..7a1d5cb76932 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -631,7 +631,7 @@ void show_regs(struct pt_regs * regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
#else
- printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+ printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
#endif
printk("TASK = %p[%d] '%s' THREAD: %p",
current, task_pid_nr(current), current->comm, task_thread_info(current));
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2b442e6c21e6..bf5f5ce3a7bd 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -256,31 +256,16 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char msg[RTAS_MSG_MAXLEN];
- int msglen;
- uf = (struct rtas_update_flash_t *) dp->data;
+ uf = dp->data;
if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
get_flash_status_msg(uf->status, msg);
} else { /* FIRMWARE_UPDATE_NAME */
sprintf(msg, "%d\n", uf->status);
}
- msglen = strlen(msg);
- if (msglen > count)
- msglen = count;
-
- if (ppos && *ppos != 0)
- return 0; /* be cheap */
-
- if (!access_ok(VERIFY_WRITE, buf, msglen))
- return -EINVAL;
- if (copy_to_user(buf, msg, msglen))
- return -EFAULT;
-
- if (ppos)
- *ppos = msglen;
- return msglen;
+ return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* constructor for flash_block_cache */
@@ -394,26 +379,13 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf,
char msg[RTAS_MSG_MAXLEN];
int msglen;
- args_buf = (struct rtas_manage_flash_t *) dp->data;
+ args_buf = dp->data;
if (args_buf == NULL)
return 0;
msglen = sprintf(msg, "%d\n", args_buf->status);
- if (msglen > count)
- msglen = count;
- if (ppos && *ppos != 0)
- return 0; /* be cheap */
-
- if (!access_ok(VERIFY_WRITE, buf, msglen))
- return -EINVAL;
-
- if (copy_to_user(buf, msg, msglen))
- return -EFAULT;
-
- if (ppos)
- *ppos = msglen;
- return msglen;
+ return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
@@ -495,24 +467,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
char msg[RTAS_MSG_MAXLEN];
int msglen;
- args_buf = (struct rtas_validate_flash_t *) dp->data;
+ args_buf = dp->data;
- if (ppos && *ppos != 0)
- return 0; /* be cheap */
-
msglen = get_validate_flash_msg(args_buf, msg);
- if (msglen > count)
- msglen = count;
-
- if (!access_ok(VERIFY_WRITE, buf, msglen))
- return -EINVAL;
-
- if (copy_to_user(buf, msg, msglen))
- return -EFAULT;
- if (ppos)
- *ppos = msglen;
- return msglen;
+ return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 0438f819fe6b..049dbecb5dbc 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -160,7 +160,7 @@ static int log_rtas_len(char * buf)
/* rtas fixed header */
len = 8;
err = (struct rtas_error_log *)buf;
- if (err->extended_log_length) {
+ if (err->extended && err->extended_log_length) {
/* extended header */
len += err->extended_log_length;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09e4dea4a85a..09d31dbf43f9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -265,11 +265,26 @@ void accumulate_stolen_time(void)
{
u64 sst, ust;
- sst = scan_dispatch_log(get_paca()->starttime_user);
- ust = scan_dispatch_log(get_paca()->starttime);
- get_paca()->system_time -= sst;
- get_paca()->user_time -= ust;
- get_paca()->stolen_time += ust + sst;
+ u8 save_soft_enabled = local_paca->soft_enabled;
+ u8 save_hard_enabled = local_paca->hard_enabled;
+
+ /* We are called early in the exception entry, before
+ * soft/hard_enabled are sync'ed to the expected state
+ * for the exception. We are hard disabled but the PACA
+ * needs to reflect that so various debug stuff doesn't
+ * complain
+ */
+ local_paca->soft_enabled = 0;
+ local_paca->hard_enabled = 0;
+
+ sst = scan_dispatch_log(local_paca->starttime_user);
+ ust = scan_dispatch_log(local_paca->starttime);
+ local_paca->system_time -= sst;
+ local_paca->user_time -= ust;
+ local_paca->stolen_time += ust + sst;
+
+ local_paca->soft_enabled = save_soft_enabled;
+ local_paca->hard_enabled = save_hard_enabled;
}
static inline u64 calculate_stolen_time(u64 stop_tb)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1b2cdc8eec90..bd74fac169be 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -626,12 +626,6 @@ void machine_check_exception(struct pt_regs *regs)
if (recover > 0)
return;
- if (user_mode(regs)) {
- regs->msr |= MSR_RI;
- _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
- return;
- }
-
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
/* the qspan pci read routines can cause machine checks -- Cort
*
@@ -643,16 +637,12 @@ void machine_check_exception(struct pt_regs *regs)
return;
#endif
- if (debugger_fault_handler(regs)) {
- regs->msr |= MSR_RI;
+ if (debugger_fault_handler(regs))
return;
- }
if (check_io_access(regs))
return;
- if (debugger_fault_handler(regs))
- return;
die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index cb737484c5aa..f4613118132e 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -172,6 +172,25 @@ globl(ftr_fixup_test6_expected)
3: or 3,3,3
+#if 0
+/* Test that if we have a larger else case the assembler spots it and
+ * reports an error. #if 0'ed so as not to break the build normally.
+ */
+ftr_fixup_test7:
+ or 1,1,1
+BEGIN_FTR_SECTION
+ or 2,2,2
+ or 2,2,2
+ or 2,2,2
+FTR_SECTION_ELSE
+ or 3,3,3
+ or 3,3,3
+ or 3,3,3
+ or 3,3,3
+ALT_FTR_SECTION_END(0, 1)
+ or 1,1,1
+#endif
+
#define MAKE_MACRO_TEST(TYPE) \
globl(ftr_fixup_test_ ##TYPE##_macros) \
or 1,1,1; \
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 661d354e4ff2..d0c4e15b7794 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -57,12 +57,12 @@ static void __init mpc830x_rdb_init_IRQ(void)
ipic_set_default_priority();
}
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
"MPC8308RDB",
"fsl,mpc8308rdb",
"denx,mpc8308_p1m",
NULL
-}
+};
/*
* Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index b54cd736a895..f859ead49a8d 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -60,11 +60,11 @@ static void __init mpc831x_rdb_init_IRQ(void)
ipic_set_default_priority();
}
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
"MPC8313ERDB",
"fsl,mpc8315erdb",
NULL
-}
+};
/*
* Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 0fea8811d45b..82a434510d83 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -35,6 +35,8 @@
/* system i/o configuration register high */
#define MPC83XX_SICRH_OFFS 0x118
+#define MPC8308_SICRH_USB_MASK 0x000c0000
+#define MPC8308_SICRH_USB_ULPI 0x00040000
#define MPC834X_SICRH_USB_UTMI 0x00020000
#define MPC831X_SICRH_USB_MASK 0x000000e0
#define MPC831X_SICRH_USB_ULPI 0x000000a0
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 3ba4bb7d41bb..2c64164722d0 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -127,7 +127,8 @@ int mpc831x_usb_cfg(void)
/* Configure clock */
immr_node = of_get_parent(np);
- if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
+ if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") ||
+ of_device_is_compatible(immr_node, "fsl,mpc8308-immr")))
clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
MPC8315_SCCR_USB_MASK,
MPC8315_SCCR_USB_DRCM_01);
@@ -138,7 +139,11 @@ int mpc831x_usb_cfg(void)
/* Configure pin mux for ULPI. There is no pin mux for UTMI */
if (prop && !strcmp(prop, "ulpi")) {
- if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
+ if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+ clrsetbits_be32(immap + MPC83XX_SICRH_OFFS,
+ MPC8308_SICRH_USB_MASK,
+ MPC8308_SICRH_USB_ULPI);
+ } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
clrsetbits_be32(immap + MPC83XX_SICRL_OFFS,
MPC8315_SICRL_USB_MASK,
MPC8315_SICRL_USB_ULPI);
@@ -173,6 +178,9 @@ int mpc831x_usb_cfg(void)
!strcmp(prop, "utmi"))) {
u32 refsel;
+ if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))
+ goto out;
+
if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
refsel = CONTROL_REFSEL_24MHZ;
else
@@ -186,9 +194,11 @@ int mpc831x_usb_cfg(void)
temp = CONTROL_PHY_CLK_SEL_ULPI;
#ifdef CONFIG_USB_OTG
/* Set OTG_PORT */
- dr_mode = of_get_property(np, "dr_mode", NULL);
- if (dr_mode && !strcmp(dr_mode, "otg"))
- temp |= CONTROL_OTG_PORT;
+ if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+ dr_mode = of_get_property(np, "dr_mode", NULL);
+ if (dr_mode && !strcmp(dr_mode, "otg"))
+ temp |= CONTROL_OTG_PORT;
+ }
#endif /* CONFIG_USB_OTG */
out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp);
} else {
@@ -196,6 +206,7 @@ int mpc831x_usb_cfg(void)
ret = -EINVAL;
}
+out:
iounmap(usb_regs);
of_node_put(np);
return ret;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 968c1c0b4d5b..d809836bcf5f 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -39,8 +39,6 @@ struct spu_gov_info_struct {
};
static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
-static struct workqueue_struct *kspugov_wq;
-
static int calc_freq(struct spu_gov_info_struct *info)
{
int cpu;
@@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work)
__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
delay = usecs_to_jiffies(info->poll_int);
- queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+ schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
}
static void spu_gov_init_work(struct spu_gov_info_struct *info)
{
int delay = usecs_to_jiffies(info->poll_int);
INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
- queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+ schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
}
static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
@@ -152,27 +150,15 @@ static int __init spu_gov_init(void)
{
int ret;
- kspugov_wq = create_workqueue("kspugov");
- if (!kspugov_wq) {
- printk(KERN_ERR "creation of kspugov failed\n");
- ret = -EFAULT;
- goto out;
- }
-
ret = cpufreq_register_governor(&spu_governor);
- if (ret) {
+ if (ret)
printk(KERN_ERR "registration of governor failed\n");
- destroy_workqueue(kspugov_wq);
- goto out;
- }
-out:
return ret;
}
static void __exit spu_gov_exit(void)
{
cpufreq_unregister_governor(&spu_governor);
- destroy_workqueue(kspugov_wq);
}
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 1b5749042756..d31c594cfdf3 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -145,9 +145,4 @@ define_machine(qpace) {
.calibrate_decr = generic_calibrate_decr,
.progress = qpace_progress,
.init_IRQ = iic_init_IRQ,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8547e86bfb42..acfaccea5f4f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -37,6 +37,7 @@
#include <asm/spu_csa.h>
#include <asm/xmon.h>
#include <asm/prom.h>
+#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
EXPORT_SYMBOL_GPL(spu_management_ops);
@@ -727,6 +728,75 @@ static ssize_t spu_stat_show(struct sys_device *sysdev,
static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
+#ifdef CONFIG_KEXEC
+
+struct crash_spu_info {
+ struct spu *spu;
+ u32 saved_spu_runcntl_RW;
+ u32 saved_spu_status_R;
+ u32 saved_spu_npc_RW;
+ u64 saved_mfc_sr1_RW;
+ u64 saved_mfc_dar;
+ u64 saved_mfc_dsisr;
+};
+
+#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
+static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
+
+static void crash_kexec_stop_spus(void)
+{
+ struct spu *spu;
+ int i;
+ u64 tmp;
+
+ for (i = 0; i < CRASH_NUM_SPUS; i++) {
+ if (!crash_spu_info[i].spu)
+ continue;
+
+ spu = crash_spu_info[i].spu;
+
+ crash_spu_info[i].saved_spu_runcntl_RW =
+ in_be32(&spu->problem->spu_runcntl_RW);
+ crash_spu_info[i].saved_spu_status_R =
+ in_be32(&spu->problem->spu_status_R);
+ crash_spu_info[i].saved_spu_npc_RW =
+ in_be32(&spu->problem->spu_npc_RW);
+
+ crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
+ crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
+ tmp = spu_mfc_sr1_get(spu);
+ crash_spu_info[i].saved_mfc_sr1_RW = tmp;
+
+ tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ spu_mfc_sr1_set(spu, tmp);
+
+ __delay(200);
+ }
+}
+
+static void crash_register_spus(struct list_head *list)
+{
+ struct spu *spu;
+ int ret;
+
+ list_for_each_entry(spu, list, full_list) {
+ if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
+ continue;
+
+ crash_spu_info[spu->number].spu = spu;
+ }
+
+ ret = crash_shutdown_register(&crash_kexec_stop_spus);
+ if (ret)
+ printk(KERN_ERR "Could not register SPU crash handler");
+}
+
+#else
+static inline void crash_register_spus(struct list_head *list)
+{
+}
+#endif
+
static int __init init_spu_base(void)
{
int i, ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 02f7b113a31b..3c7c3f82d842 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -219,24 +219,17 @@ spufs_mem_write(struct file *file, const char __user *buffer,
loff_t pos = *ppos;
int ret;
- if (pos < 0)
- return -EINVAL;
if (pos > LS_SIZE)
return -EFBIG;
- if (size > LS_SIZE - pos)
- size = LS_SIZE - pos;
ret = spu_acquire(ctx);
if (ret)
return ret;
local_store = ctx->ops->get_ls(ctx);
- ret = copy_from_user(local_store + pos, buffer, size);
+ size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
spu_release(ctx);
- if (ret)
- return -EFAULT;
- *ppos = pos + size;
return size;
}
@@ -574,18 +567,15 @@ spufs_regs_write(struct file *file, const char __user *buffer,
if (*pos >= sizeof(lscsa->gprs))
return -EFBIG;
- size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
- *pos += size;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = copy_from_user((char *)lscsa->gprs + *pos - size,
- buffer, size) ? -EFAULT : size;
+ size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
+ buffer, size);
spu_release_saved(ctx);
- return ret;
+ return size;
}
static const struct file_operations spufs_regs_fops = {
@@ -630,18 +620,15 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
if (*pos >= sizeof(lscsa->fpcr))
return -EFBIG;
- size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- *pos += size;
- ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
- buffer, size) ? -EFAULT : size;
+ size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
+ buffer, size);
spu_release_saved(ctx);
- return ret;
+ return size;
}
static const struct file_operations spufs_fpcr_fops = {
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index 1106fd99627f..a138e14bad2e 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -75,14 +75,6 @@ static void gamecube_shutdown(void)
flipper_quiesce();
}
-#ifdef CONFIG_KEXEC
-static int gamecube_kexec_prepare(struct kimage *image)
-{
- return 0;
-}
-#endif /* CONFIG_KEXEC */
-
-
define_machine(gamecube) {
.name = "gamecube",
.probe = gamecube_probe,
@@ -95,9 +87,6 @@ define_machine(gamecube) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
.machine_shutdown = gamecube_shutdown,
-#ifdef CONFIG_KEXEC
- .machine_kexec_prepare = gamecube_kexec_prepare,
-#endif
};
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 649473a729b8..1b5dc1a2e145 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
-#include <linux/kexec.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <mm/mmu_decl.h>
@@ -226,13 +225,6 @@ static void wii_shutdown(void)
flipper_quiesce();
}
-#ifdef CONFIG_KEXEC
-static int wii_machine_kexec_prepare(struct kimage *image)
-{
- return 0;
-}
-#endif /* CONFIG_KEXEC */
-
define_machine(wii) {
.name = "wii",
.probe = wii_probe,
@@ -246,9 +238,6 @@ define_machine(wii) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
.machine_shutdown = wii_shutdown,
-#ifdef CONFIG_KEXEC
- .machine_kexec_prepare = wii_machine_kexec_prepare,
-#endif
};
static struct of_device_id wii_of_bus[] = {
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 47a20cfb4486..e5bc9f75d474 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -2,7 +2,7 @@ config PPC_ISERIES
bool "IBM Legacy iSeries"
depends on PPC64 && PPC_BOOK3S
select PPC_INDIRECT_IO
- select PPC_PCI_CHOICE if EMBEDDED
+ select PPC_PCI_CHOICE if EXPERT
menu "iSeries device drivers"
depends on PPC_ISERIES
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 5d1b743dbe7e..5b3da4b4ea79 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -10,7 +10,7 @@ config PPC_PSERIES
select RTAS_ERROR_LOGGING
select PPC_UDBG_16550
select PPC_NATIVE
- select PPC_PCI_CHOICE if EMBEDDED
+ select PPC_PCI_CHOICE if EXPERT
default y
config PPC_SPLPAR
@@ -24,9 +24,9 @@ config PPC_SPLPAR
two or more partitions.
config EEH
- bool "PCI Extended Error Handling (EEH)" if EMBEDDED
+ bool "PCI Extended Error Handling (EEH)" if EXPERT
depends on PPC_PSERIES && PCI
- default y if !EMBEDDED
+ default y if !EXPERT
config PSERIES_MSI
bool
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 53cbd53d8740..77d38a5e2ff9 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -61,13 +61,3 @@ void __init setup_kexec_cpu_down_xics(void)
{
ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
}
-
-static int __init pseries_kexec_setup(void)
-{
- ppc_md.machine_kexec = default_machine_kexec;
- ppc_md.machine_kexec_prepare = default_machine_kexec_prepare;
- ppc_md.machine_crash_shutdown = default_machine_crash_shutdown;
-
- return 0;
-}
-machine_device_initcall(pseries, pseries_kexec_setup);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index a4fc6da87c2e..c55d7ad9c648 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -54,7 +54,8 @@
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
-static char mce_data_buf[RTAS_ERROR_LOG_MAX];
+static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
+static DEFINE_PER_CPU(__u64, mce_data_buf);
static int ras_get_sensor_state_token;
static int ras_check_exception_token;
@@ -196,12 +197,24 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Get the error information for errors coming through the
+/*
+ * Some versions of FWNMI place the buffer inside the 4kB page starting at
+ * 0x7000. Other versions place it inside the rtas buffer. We check both.
+ */
+#define VALID_FWNMI_BUFFER(A) \
+ ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
+ (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+
+/*
+ * Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
* will be returned if found.
*
- * The mce_data_buf does not have any locks or protection around it,
+ * If the RTAS error is not of the extended type, then we put it in a per
+ * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
+ *
+ * The global_mce_data_buf does not have any locks or protection around it,
* if a second machine check comes in, or a system reset is done
* before we have logged the error, then we will get corruption in the
* error log. This is preferable over holding off on calling
@@ -210,20 +223,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
*/
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
- unsigned long errdata = regs->gpr[3];
- struct rtas_error_log *errhdr = NULL;
unsigned long *savep;
+ struct rtas_error_log *h, *errhdr = NULL;
+
+ if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
+ printk(KERN_ERR "FWNMI: corrupt r3\n");
+ return NULL;
+ }
- if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
- (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
- savep = __va(errdata);
- regs->gpr[3] = savep[0]; /* restore original r3 */
- memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
- memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
- errhdr = (struct rtas_error_log *)mce_data_buf;
+ savep = __va(regs->gpr[3]);
+ regs->gpr[3] = savep[0]; /* restore original r3 */
+
+ /* If it isn't an extended log we can use the per cpu 64bit buffer */
+ h = (struct rtas_error_log *)&savep[1];
+ if (!h->extended) {
+ memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
+ errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
} else {
- printk("FWNMI: corrupt r3\n");
+ int len;
+
+ len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
+ memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
+ memcpy(global_mce_data_buf, h, len);
+ errhdr = (struct rtas_error_log *)global_mce_data_buf;
}
+
return errhdr;
}
@@ -235,7 +259,7 @@ static void fwnmi_release_errinfo(void)
{
int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
if (ret != 0)
- printk("FWNMI: nmi-interlock failed: %d\n", ret);
+ printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
}
int pSeries_system_reset_exception(struct pt_regs *regs)
@@ -259,31 +283,43 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
* Return 1 if corrected (or delivered a signal).
* Return 0 if there is nothing we can do.
*/
-static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
+static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
{
- int nonfatal = 0;
+ int recovered = 0;
- if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
+ if (!(regs->msr & MSR_RI)) {
+ /* If MSR_RI isn't set, we cannot recover */
+ recovered = 0;
+
+ } else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
/* Platform corrected itself */
- nonfatal = 1;
- } else if ((regs->msr & MSR_RI) &&
- user_mode(regs) &&
- err->severity == RTAS_SEVERITY_ERROR_SYNC &&
- err->disposition == RTAS_DISP_NOT_RECOVERED &&
- err->target == RTAS_TARGET_MEMORY &&
- err->type == RTAS_TYPE_ECC_UNCORR &&
- !(current->pid == 0 || is_global_init(current))) {
- /* Kill off a user process with an ECC error */
- printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
- current->pid);
- /* XXX something better for ECC error? */
- _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
- nonfatal = 1;
+ recovered = 1;
+
+ } else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
+ /* Platform corrected itself but could be degraded */
+ printk(KERN_ERR "MCE: limited recovery, system may "
+ "be degraded\n");
+ recovered = 1;
+
+ } else if (user_mode(regs) && !is_global_init(current) &&
+ err->severity == RTAS_SEVERITY_ERROR_SYNC) {
+
+ /*
+ * If we received a synchronous error when in userspace
+ * kill the task. Firmware may report details of the fail
+ * asynchronously, so we can't rely on the target and type
+ * fields being valid here.
+ */
+ printk(KERN_ERR "MCE: uncorrectable error, killing task "
+ "%s:%d\n", current->comm, current->pid);
+
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
}
- log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
+ log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
- return nonfatal;
+ return recovered;
}
/*
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9f99bef2adec..8c6cab013278 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1555,8 +1555,6 @@ int fsl_rio_setup(struct platform_device *dev)
saved_mcheck_exception = ppc_md.machine_check_exception;
ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
#endif
- /* Ensure that RFXE is set */
- mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
return 0;
err:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7c1342618a30..b0c8469e5ddd 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -674,7 +674,8 @@ void mpic_unmask_irq(unsigned int irq)
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
- printk(KERN_ERR "mpic_enable_irq timeout\n");
+ printk(KERN_ERR "%s: timeout on hwirq %u\n",
+ __func__, src);
break;
}
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
@@ -695,7 +696,8 @@ void mpic_mask_irq(unsigned int irq)
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
- printk(KERN_ERR "mpic_enable_irq timeout\n");
+ printk(KERN_ERR "%s: timeout on hwirq %u\n",
+ __func__, src);
break;
}
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 4293fdcb5398..27b2295f41f3 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -1,5 +1,9 @@
menu "Machine selection"
+config SCORE
+ def_bool y
+ select HAVE_GENERIC_HARDIRQS
+
choice
prompt "System type"
default MACH_SPCT6600
@@ -53,9 +57,6 @@ config GENERIC_CLOCKEVENTS
config SCHED_NO_NO_OMIT_FRAME_POINTER
def_bool y
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config GENERIC_SYSCALL_TABLE
def_bool y
@@ -68,9 +69,6 @@ menu "Kernel type"
config 32BIT
def_bool y
-config GENERIC_HARDIRQS
- def_bool y
-
config ARCH_FLATMEM_ENABLE
def_bool y
diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
index 9883c50e4636..df1edbf507a2 100644
--- a/arch/score/configs/spct6600_defconfig
+++ b/arch/score/configs/spct6600_defconfig
@@ -9,7 +9,7 @@ CONFIG_LOG_BUF_SHIFT=12
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index fff252209f63..ae555569823b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,6 +1,6 @@
config SUPERH
def_bool y
- select EMBEDDED
+ select EXPERT
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT
select HAVE_MEMBLOCK
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 45d9c87d083a..95695e97703e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -50,6 +50,7 @@ config SPARC64
select RTC_DRV_STARFIRE
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
+ select HAVE_GENERIC_HARDIRQS
config ARCH_DEFCONFIG
string
@@ -107,10 +108,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config NEED_PER_CPU_PAGE_FIRST_CHUNK
def_bool y if SPARC64
-config GENERIC_HARDIRQS_NO__DO_IRQ
- bool
- def_bool y if SPARC64
-
config MMU
bool
default y
@@ -276,10 +273,6 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
-config GENERIC_HARDIRQS
- bool
- default y if SPARC64
-
source "kernel/time/Kconfig"
if SPARC64
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e11b5fcb70eb..08948e4e1503 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -1,24 +1,33 @@
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/config-language.txt.
-config MMU
- def_bool y
-
-config GENERIC_CSUM
- def_bool y
-
-config GENERIC_HARDIRQS
+config TILE
def_bool y
+ select HAVE_KVM if !TILEGX
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_FIND_NEXT_BIT
+ select USE_GENERIC_SMP_HELPERS
+ select CC_OPTIMIZE_FOR_SIZE
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select GENERIC_PENDING_IRQ if SMP
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
+# FIXME: investigate whether we need/want these options.
+# select HAVE_IOREMAP_PROT
+# select HAVE_OPTPROBES
+# select HAVE_REGS_AND_STACK_ACCESS_API
+# select HAVE_HW_BREAKPOINT
+# select PERF_EVENTS
+# select HAVE_USER_RETURN_NOTIFIER
+# config NO_BOOTMEM
+# config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+# config HUGETLB_PAGE_SIZE_VARIABLE
-config GENERIC_IRQ_PROBE
+config MMU
def_bool y
-config GENERIC_PENDING_IRQ
+config GENERIC_CSUM
def_bool y
- depends on GENERIC_HARDIRQS && SMP
config SEMAPHORE_SLEEPERS
def_bool y
@@ -97,26 +106,6 @@ config HVC_TILE
select HVC_DRIVER
def_bool y
-config TILE
- def_bool y
- select HAVE_KVM if !TILEGX
- select GENERIC_FIND_FIRST_BIT
- select GENERIC_FIND_NEXT_BIT
- select USE_GENERIC_SMP_HELPERS
- select CC_OPTIMIZE_FOR_SIZE
-
-# FIXME: investigate whether we need/want these options.
-# select HAVE_IOREMAP_PROT
-# select HAVE_OPTPROBES
-# select HAVE_REGS_AND_STACK_ACCESS_API
-# select HAVE_HW_BREAKPOINT
-# select PERF_EVENTS
-# select HAVE_USER_RETURN_NOTIFIER
-# config NO_BOOTMEM
-# config ARCH_SUPPORTS_DEBUG_PAGEALLOC
-# config HUGETLB_PAGE_SIZE_VARIABLE
-
-
# Please note: TILE-Gx support is not yet finalized; this is
# the preliminary support. TILE-Gx drivers are only provided
# with the alpha or beta test versions for Tilera customers.
@@ -220,7 +209,7 @@ config FORCE_MAX_ZONEORDER
choice
depends on !TILEGX
- prompt "Memory split" if EMBEDDED
+ prompt "Memory split" if EXPERT
default VMSPLIT_3G
---help---
Select the desired split between kernel and user memory.
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index a81f0fbf7e60..9bc161a02c71 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -3,7 +3,7 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
config EARLY_PRINTK
- bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+ bool "Early printk" if EXPERT && DEBUG_KERNEL
default y
help
Write kernel log output directly via the hypervisor console.
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
index 919c54afd981..0fe54445fda5 100644
--- a/arch/tile/configs/tile_defconfig
+++ b/arch/tile/configs/tile_defconfig
@@ -3,7 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 049d048b070d..e351e14b4339 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,14 +3,10 @@ config DEFCONFIG_LIST
option defconfig_list
default "arch/$ARCH/defconfig"
-# UML uses the generic IRQ subsystem
-config GENERIC_HARDIRQS
- bool
- default y
-
config UML
bool
default y
+ select HAVE_GENERIC_HARDIRQS
config MMU
bool
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index f8d1d0d47fe6..90a438acbfaf 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -120,9 +120,6 @@ config SMP
If you don't know what to do, say N.
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 564f3de65b4a..9f7634f08cf3 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -133,7 +133,7 @@ CONFIG_SYSFS_DEPRECATED=y
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
CONFIG_UID16=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ed5ad92b029..d5ed94d30aad 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -627,11 +627,11 @@ config APB_TIMER
as it is off-chip. APB timers are always running regardless of CPU
C states, they are used as per CPU clockevent device when possible.
-# Mark as embedded because too many people got it wrong.
+# Mark as expert because too many people got it wrong.
# The code disables itself when not needed.
config DMI
default y
- bool "Enable DMI scanning" if EMBEDDED
+ bool "Enable DMI scanning" if EXPERT
---help---
Enabled scanning of DMI to identify machine quirks. Say Y
here unless you have verified that your setup is not
@@ -639,7 +639,7 @@ config DMI
BIOS code.
config GART_IOMMU
- bool "GART IOMMU support" if EMBEDDED
+ bool "GART IOMMU support" if EXPERT
default y
select SWIOTLB
depends on X86_64 && PCI && AMD_NB
@@ -889,7 +889,7 @@ config X86_THERMAL_VECTOR
depends on X86_MCE_INTEL
config VM86
- bool "Enable VM86 support" if EMBEDDED
+ bool "Enable VM86 support" if EXPERT
default y
depends on X86_32
---help---
@@ -1073,7 +1073,7 @@ endchoice
choice
depends on EXPERIMENTAL
- prompt "Memory split" if EMBEDDED
+ prompt "Memory split" if EXPERT
default VMSPLIT_3G
depends on X86_32
---help---
@@ -1135,7 +1135,7 @@ config ARCH_DMA_ADDR_T_64BIT
def_bool X86_64 || HIGHMEM64G
config DIRECT_GBPAGES
- bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
+ bool "Enable 1GB pages for kernel pagetables" if EXPERT
default y
depends on X86_64
---help---
@@ -1369,7 +1369,7 @@ config MATH_EMULATION
config MTRR
def_bool y
- prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
+ prompt "MTRR (Memory Type Range Register) support" if EXPERT
---help---
On Intel P6 family processors (Pentium Pro, Pentium II and later)
the Memory Type Range Registers (MTRRs) may be used to control
@@ -1435,7 +1435,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
config X86_PAT
def_bool y
- prompt "x86 PAT support" if EMBEDDED
+ prompt "x86 PAT support" if EXPERT
depends on MTRR
---help---
Use PAT attributes to setup page level cache control.
@@ -1539,7 +1539,7 @@ config KEXEC_JUMP
code in physical address mode via KEXEC
config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
default "0x1000000"
---help---
This gives the physical address where the kernel is loaded.
@@ -1934,7 +1934,7 @@ config PCI_MMCONFIG
depends on X86_64 && PCI && ACPI
config PCI_CNB20LE_QUIRK
- bool "Read CNB20LE Host Bridge Windows" if EMBEDDED
+ bool "Read CNB20LE Host Bridge Windows" if EXPERT
default n
depends on PCI && EXPERIMENTAL
help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 15588a0ef466..283c5a6a03a6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -424,7 +424,7 @@ config X86_DEBUGCTLMSR
depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
menuconfig PROCESSOR_SELECT
- bool "Supported processor vendors" if EMBEDDED
+ bool "Supported processor vendors" if EXPERT
---help---
This lets you choose what x86 vendor support code your kernel
will include.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 45143bbcfe5e..615e18810f48 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -31,7 +31,7 @@ config X86_VERBOSE_BOOTUP
see errors. Disable this if you want silent bootup.
config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
+ bool "Early printk" if EXPERT
default y
---help---
Write kernel log output directly into the VGA buffer or to a serial
@@ -138,7 +138,7 @@ config DEBUG_NX_TEST
config DOUBLEFAULT
default y
- bool "Enable doublefault exception handler" if EMBEDDED
+ bool "Enable doublefault exception handler" if EXPERT
depends on X86_32
---help---
This option allows trapping of rare doublefault exceptions that
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec9075c..62f084478f7e 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
#ifndef _ASM_X86_CACHEFLUSH_H
#define _ASM_X86_CACHEFLUSH_H
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-
/* Caches aren't brain-dead on the intel. */
-static inline void flush_cache_all(void) { }
-static inline void flush_cache_mm(struct mm_struct *mm) { }
-static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end) { }
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn) { }
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-static inline void flush_dcache_page(struct page *page) { }
-static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
-static inline void flush_icache_range(unsigned long start,
- unsigned long end) { }
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page) { }
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page,
- unsigned long addr,
- unsigned long len) { }
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-static inline void flush_cache_vunmap(unsigned long start,
- unsigned long end) { }
-
-static inline void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, const void *src,
- unsigned long len)
-{
- memcpy(dst, src, len);
-}
-
-static inline void copy_from_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, const void *src,
- unsigned long len)
-{
- memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
#ifdef CONFIG_X86_PAT
/*
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4fab24de26b1..6e6e7558e702 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
DECLARE_PER_CPU(int, cpu_state);
+int __cpuinit mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index f52d42e80585..574dbc22893a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,7 +14,7 @@
do { \
asm goto("1:" \
JUMP_LABEL_INITIAL_NOP \
- ".pushsection __jump_table, \"a\" \n\t"\
+ ".pushsection __jump_table, \"aw\" \n\t"\
_ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
".popsection \n\t" \
: : "i" (key) : : label); \
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a37229011b56..b0ef2b449a9d 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_NUMA_32_H
#define _ASM_X86_NUMA_32_H
+extern int numa_off;
+
extern int pxm_to_nid(int pxm);
extern void numa_remove_cpu(int cpu);
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 5ae87285a502..0493be39607c 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -40,6 +40,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA_EMU
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
+void numa_emu_cmdline(char *);
#endif /* CONFIG_NUMA_EMU */
#else
static inline void init_cpu_to_node(void) { }
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8ee45167e817..3788f4649db4 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -414,8 +414,6 @@ do { \
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
-#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
@@ -432,8 +430,6 @@ do { \
#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#ifndef CONFIG_M386
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
@@ -475,11 +471,15 @@ do { \
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7283e98deaae..ec2c19a7b8ef 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -45,6 +45,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
{ 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
+ { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
{ 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
{ 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
{ 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
{ 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
+ { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
{ 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
{ 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
{ 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
{ 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
{ 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
+ { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
{ 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
{ 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
{ 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index e12246ff5aa6..6f8c5e9da97f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -59,6 +59,7 @@ struct thermal_state {
/* Callback to handle core threshold interrupts */
int (*platform_thermal_notify)(__u64 msr_val);
+EXPORT_SYMBOL(platform_thermal_notify);
static DEFINE_PER_CPU(struct thermal_state, thermal_state);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d8286ed54ffa..e764fc05d700 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
#include <linux/utsname.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
+#include <asm/cpu.h>
#include <asm/system.h>
#include <asm/apic.h>
#include <asm/syscalls.h>
@@ -505,7 +506,7 @@ static void poll_idle(void)
#define MWAIT_ECX_EXTENDED_INFO 0x01
#define MWAIT_EDX_C1 0xf0
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763df77343dd..0cbe8c0b35ed 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1402,8 +1402,9 @@ static inline void mwait_play_dead(void)
unsigned int highest_subcstate = 0;
int i;
void *mwait_ptr;
+ struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
- if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
+ if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
return;
if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
return;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b34ab80fddd5..bf4700755184 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -34,9 +34,11 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
+jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
+jiffies_64 = jiffies;
#endif
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -140,15 +142,6 @@ SECTIONS
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
DATA_DATA
- /*
- * Workaround a binutils (2.20.51.0.12 to 2.21.51.0.3) bug.
- * This makes jiffies relocatable in such binutils
- */
-#ifdef CONFIG_X86_32
- jiffies = jiffies_64;
-#else
- jiffies_64 = jiffies;
-#endif
CONSTRUCTORS
/* rarely changed data like cpu maps */
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 38718041efc3..6e121a2a49e1 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
bool "Lguest guest support"
select PARAVIRT
depends on X86_32
+ select VIRTUALIZATION
select VIRTIO
select VIRTIO_RING
select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4996cf5f73a0..eba687f0cc0c 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -824,7 +824,7 @@ static void __init lguest_init_IRQ(void)
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* Some systems map "vectors" to interrupts weirdly. Not us! */
- __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
+ __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 787c52ca49c3..ebf6d7887a38 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,28 @@
#include <linux/topology.h>
#include <linux/module.h>
#include <linux/bootmem.h>
+#include <asm/numa.h>
+#include <asm/acpi.h>
+
+int __initdata numa_off;
+
+static __init int numa_setup(char *opt)
+{
+ if (!opt)
+ return -EINVAL;
+ if (!strncmp(opt, "off", 3))
+ numa_off = 1;
+#ifdef CONFIG_NUMA_EMU
+ if (!strncmp(opt, "fake=", 5))
+ numa_emu_cmdline(opt + 5);
+#endif
+#ifdef CONFIG_ACPI_NUMA
+ if (!strncmp(opt, "noacpi", 6))
+ acpi_numa = -1;
+#endif
+ return 0;
+}
+early_param("numa", numa_setup);
/*
* Which logical CPUs are on which nodes
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 1e72102e80c9..95ea1551eebc 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -30,7 +30,6 @@ s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
};
-int numa_off __initdata;
static unsigned long __initdata nodemap_addr;
static unsigned long __initdata nodemap_size;
@@ -263,6 +262,11 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
static char *cmdline __initdata;
+void __init numa_emu_cmdline(char *str)
+{
+ cmdline = str;
+}
+
static int __init setup_physnodes(unsigned long start, unsigned long end,
int acpi, int amd)
{
@@ -670,24 +674,6 @@ unsigned long __init numa_free_all_bootmem(void)
return pages;
}
-static __init int numa_setup(char *opt)
-{
- if (!opt)
- return -EINVAL;
- if (!strncmp(opt, "off", 3))
- numa_off = 1;
-#ifdef CONFIG_NUMA_EMU
- if (!strncmp(opt, "fake=", 5))
- cmdline = opt + 5;
-#endif
-#ifdef CONFIG_ACPI_NUMA
- if (!strncmp(opt, "noacpi", 6))
- acpi_numa = -1;
-#endif
- return 0;
-}
-early_param("numa", numa_setup);
-
#ifdef CONFIG_NUMA
static __init int find_near_online_node(int node)
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index f16434568a51..ae96e7b8051d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -59,7 +59,6 @@ static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
static int __initdata num_memory_chunks; /* total number of memory chunks */
static u8 __initdata apicid_to_pxm[MAX_APICID];
-int numa_off __initdata;
int acpi_numa __initdata;
static __init void bad_srat(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7e8d3bc80af6..50542efe45fb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1194,7 +1194,7 @@ asmlinkage void __init xen_start_kernel(void)
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
local_irq_disable();
- early_boot_irqs_off();
+ early_boot_irqs_disabled = true;
memblock_init();
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9d30105a0c4a..6a6fe8939645 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -126,7 +126,7 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
#endif
};
-void __init xen_init_irq_ops()
+void __init xen_init_irq_ops(void)
{
pv_irq_ops = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8f2251d2a3f8..ddc81a06edb9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -237,7 +237,25 @@ void __init xen_build_dynamic_phys_to_machine(void)
p2m_top[topidx] = mid;
}
- p2m_top[topidx][mididx] = &mfn_list[pfn];
+ /*
+ * As long as the mfn_list has enough entries to completely
+ * fill a p2m page, pointing into the array is ok. But if
+ * not the entries beyond the last pfn will be undefined.
+ * And guessing that the 'what-ever-there-is' does not take it
+ * too kindly when changing it to invalid markers, a new page
+ * is allocated, initialized and filled with the valid part.
+ */
+ if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
+ unsigned long p2midx;
+ unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_init(p2m);
+
+ for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) {
+ p2m[p2midx] = mfn_list[pfn + p2midx];
+ }
+ p2m_top[topidx][mididx] = p2m;
+ } else
+ p2m_top[topidx][mididx] = &mfn_list[pfn];
}
m2p_override_init();
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index 1d230ee081b4..b90038e40dd3 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -32,7 +32,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_HOTPLUG is not set
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 7368164843b9..0234cd198c54 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -55,7 +55,7 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index bb84fbc9921f..095cd8084164 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -55,7 +55,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
diff --git a/block/Kconfig b/block/Kconfig
index 6c9213ef15a1..60be1e0455da 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -2,7 +2,7 @@
# Block layer core configuration
#
menuconfig BLOCK
- bool "Enable the block layer" if EMBEDDED
+ bool "Enable the block layer" if EXPERT
default y
help
Provide block layer support for the kernel.
diff --git a/drivers/Makefile b/drivers/Makefile
index 7eb35f479461..b423bb16c3a8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
obj-$(CONFIG_REGULATOR) += regulator/
-# char/ comes before serial/ etc so that the VT console is the boot-time
+# tty/ comes before char/ so that the VT console is the boot-time
# default.
obj-y += tty/
obj-y += char/
@@ -38,7 +38,6 @@ obj-$(CONFIG_CONNECTOR) += connector/
obj-$(CONFIG_FB_I810) += video/i810/
obj-$(CONFIG_FB_INTEL) += video/intelfb/
-obj-y += serial/
obj-$(CONFIG_PARPORT) += parport/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_NUBUS) += nubus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 10c7ad59c0e1..2aa042a5da6d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -318,7 +318,7 @@ config ACPI_PCI_SLOT
the module will be called pci_slot.
config X86_PM_TIMER
- bool "Power Management Timer Support" if EMBEDDED
+ bool "Power Management Timer Support" if EXPERT
depends on X86
default y
help
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 3e50c74ed4a1..e0ba17f0a7c8 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index b17d8de9f6ff..ab87396c2c07 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 72e9d5eb083c..eb0b1f8dee6d 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 894a0ff2a946..666271b65418 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 70e0b28801aa..41d247daf461 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 0e4dba0d0325..82a1bd283db8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 258d628793ea..e7213beaafc7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 049e203bd621..3731e1c34b83 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 74000f5b7dab..54784bb42cec 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 8d5c9e0a495f..b7491ee1fba6 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index d44d3bc5b847..79a598c67fe3 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 962a3ccff6fd..1055769f2f01 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,8 +97,6 @@
#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */
#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */
#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */
-#define AOPOBJ_MODULE_LEVEL 0x40 /* Method is actually module-level code */
-#define AOPOBJ_MODIFIED_NAMESPACE 0x80 /* Method modified the namespace */
/******************************************************************************
*
@@ -175,7 +173,7 @@ struct acpi_object_region {
};
struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+ ACPI_OBJECT_COMMON_HEADER u8 info_flags;
u8 param_count;
u8 sync_level;
union acpi_operand_object *mutex;
@@ -183,13 +181,21 @@ struct acpi_object_method {
union {
ACPI_INTERNAL_METHOD implementation;
union acpi_operand_object *handler;
- } extra;
+ } dispatch;
u32 aml_length;
u8 thread_count;
acpi_owner_id owner_id;
};
+/* Flags for info_flags field above */
+
+#define ACPI_METHOD_MODULE_LEVEL 0x01 /* Method is actually module-level code */
+#define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */
+#define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */
+#define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */
+#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */
+
/******************************************************************************
*
* Objects that can be notified. All share a common notify_info area.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8c15ff43f42b..bb2ccfad7376 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index d0bb0fd3e57a..5ea1e06afa20 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 10998d369ad0..94e73c97cf85 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 528bcbaf4ce7..f08b55b7f3a0 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6e5dd97949fe..1623b245dde2 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 62a576e34361..967f08124eba 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 72e4183c1937..99c140d8e348 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1f484ba228fc..f4f0998d3967 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -480,16 +480,10 @@ typedef enum {
AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
} AML_ACCESS_ATTRIBUTE;
-/* Bit fields in method_flags byte */
+/* Bit fields in the AML method_flags byte */
#define AML_METHOD_ARG_COUNT 0x07
#define AML_METHOD_SERIALIZED 0x08
#define AML_METHOD_SYNC_LEVEL 0xF0
-/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
-
-#define AML_METHOD_INTERNAL_ONLY 0x01
-#define AML_METHOD_RESERVED1 0x02
-#define AML_METHOD_RESERVED2 0x04
-
#endif /* __AMLCODE_H__ */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 0e5798fcbb19..59122cde247c 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 347bee1726f1..34be60c0e448 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index cc4a38c57558..a7718bf2b9a1 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d94dd8974b55..5d797751e205 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
@@ -201,7 +200,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/*
* If this method is serialized, we need to acquire the method mutex.
*/
- if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
/*
* Create a mutex for the method if it is defined to be Serialized
* and a mutex has not already been created. We defer the mutex creation
@@ -413,8 +412,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
/* Invoke an internal method if necessary */
- if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = obj_desc->method.extra.implementation(next_walk_state);
+ if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
+ status =
+ obj_desc->method.dispatch.implementation(next_walk_state);
if (status == AE_OK) {
status = AE_CTRL_TERMINATE;
}
@@ -579,11 +579,14 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
/*
* Delete any namespace objects created anywhere within the
- * namespace by the execution of this method. Unless this method
- * is a module-level executable code method, in which case we
- * want make the objects permanent.
+ * namespace by the execution of this method. Unless:
+ * 1) This method is a module-level executable code method, in which
+ * case we want make the objects permanent.
+ * 2) There are other threads executing the method, in which case we
+ * will wait until the last thread has completed.
*/
- if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
+ && (method_desc->method.thread_count == 1)) {
/* Delete any direct children of (created by) this method */
@@ -593,12 +596,17 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
/*
* Delete any objects that were created by this method
* elsewhere in the namespace (if any were created).
+ * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
+ * deletion such that we don't have to perform an entire
+ * namespace walk for every control method execution.
*/
if (method_desc->method.
- flags & AOPOBJ_MODIFIED_NAMESPACE) {
+ info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
acpi_ns_delete_namespace_by_owner(method_desc->
method.
owner_id);
+ method_desc->method.info_flags &=
+ ~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
}
@@ -629,19 +637,43 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* Serialized if it appears that the method is incorrectly written and
* does not support multiple thread execution. The best example of this
* is if such a method creates namespace objects and blocks. A second
- * thread will fail with an AE_ALREADY_EXISTS exception
+ * thread will fail with an AE_ALREADY_EXISTS exception.
*
* This code is here because we must wait until the last thread exits
- * before creating the synchronization semaphore.
+ * before marking the method as serialized.
*/
- if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
- && (!method_desc->method.mutex)) {
- (void)acpi_ds_create_method_mutex(method_desc);
+ if (method_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
+ if (walk_state) {
+ ACPI_INFO((AE_INFO,
+ "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+ walk_state->method_node->name.
+ ascii));
+ }
+
+ /*
+ * Method tried to create an object twice and was marked as
+ * "pending serialized". The probable cause is that the method
+ * cannot handle reentrancy.
+ *
+ * The method was created as not_serialized, but it tried to create
+ * a named object and then blocked, causing the second thread
+ * entrance to begin and then fail. Workaround this problem by
+ * marking the method permanently as Serialized when the last
+ * thread exits here.
+ */
+ method_desc->method.info_flags &=
+ ~ACPI_METHOD_SERIALIZED_PENDING;
+ method_desc->method.info_flags |=
+ ACPI_METHOD_SERIALIZED;
+ method_desc->method.sync_level = 0;
}
/* No more threads, we can free the owner_id */
- if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ if (!
+ (method_desc->method.
+ info_flags & ACPI_METHOD_MODULE_LEVEL)) {
acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8095306fcd8c..905ce29a92e1 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8e85f54a8e0e..f42e17e5c252 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 7c0e74227171..bbecf293aeeb 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 15135c25aa9b..2c477ce172fa 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b0b5d08d97a..fe40e4c6554f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 140a9d002959..52566ff5e903 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d1e701709dac..76a661fc1e09 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 83155dd8671e..a6c374ef9914 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index e5e313c663a5..d458b041e651 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 7c339d34ab42..14988a86066f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -471,6 +471,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
@@ -478,6 +479,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9acb86958c09..ca2c41a53311 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index c59dc2340593..ce9aa9f9a972 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 10e477494dcf..80a81d0c4a80 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 38bba66fcce5..7dc80946f7bd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 98fd210e87b2..785a5ee64585 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47a6dc9290..9659cee6093e 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -590,9 +590,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
* See acpi_ns_exec_module_code
*/
if (obj_desc->method.
- flags & AOPOBJ_MODULE_LEVEL) {
+ info_flags & ACPI_METHOD_MODULE_LEVEL) {
handler_obj =
- obj_desc->method.extra.handler;
+ obj_desc->method.dispatch.handler;
}
break;
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8dfbaa96e422..2ebd40e1a3ef 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 1226689bdb1b..e1141402dbed 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 90488c1e0f3d..c57b5c707a77 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 416845bc9c1f..e9562a7cb2f9 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index ce9314f79451..eb7386763712 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 18832205b631..745a42b401f5 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index b73bc50c5b76..74162a11817d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c61b48c73f5..e7b372d17667 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -482,13 +482,11 @@ acpi_ex_create_method(u8 * aml_start,
obj_desc->method.aml_length = aml_length;
/*
- * Disassemble the method flags. Split off the Arg Count
- * for efficiency
+ * Disassemble the method flags. Split off the arg_count, Serialized
+ * flag, and sync_level for efficiency.
*/
method_flags = (u8) operand[1]->integer.value;
- obj_desc->method.method_flags =
- (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
obj_desc->method.param_count =
(u8) (method_flags & AML_METHOD_ARG_COUNT);
@@ -497,6 +495,8 @@ acpi_ex_create_method(u8 * aml_start,
* created for this method when it is parsed.
*/
if (method_flags & AML_METHOD_SERIALIZED) {
+ obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED;
+
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index be8c98b480d7..c7a2f1edd282 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f067bbb0d961..61b8c0e8b74d 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
static struct acpi_exdump_info acpi_ex_dump_method[9] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
"Parameter Count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index f17d2ff0031b..0bde2230c028 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 38293fd3e088..6c79c29f082d 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 95db4be0877b..703d88ed0b3d 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 6af14e43f839..be1c56ead653 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index d11e539ef763..49ec049c157e 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 84e4d185aa25..236ead14b7f7 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 10e104cf0fb9..2571b4a310f4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 7a08d23befcd..1b48d9d28c9a 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4b50730cf9a0..f4a2787e8e92 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 7aae29f73d3f..cc95e2000406 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index de17e10da0ed..f0d5e14f1f2c 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 1fa4289a687e..55997e46948b 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7ca35ea8acea..db502cd7d934 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 8c97cfd6a0fd..e3bb00ccdff5 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 1624436ba4c5..c0c8842dd344 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index d4af684620ca..a979017d56b8 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index e972b667b09b..dc665cc554de 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 675aaa91a770..df66e7b686be 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4093522eed45..8ad93146dd32 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index b44274a0b62c..fc380d3d45ab 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 85c3cbd4304d..f610d88a66be 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index ad21c7d8bf4f..050fd227951b 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5d1273b660ae..55accb7018bb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3796811276ac..2ac28bbe8827 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 1ef8e0bb250b..9c8eb71a12fb 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e1d9c777b213..5f1605874655 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 50cc3be77724..6f98d210e71c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 0cd925be5fc1..d93172fd15a8 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -163,9 +163,9 @@ acpi_status acpi_ns_root_initialize(void)
#else
/* Mark this as a very SPECIAL method */
- obj_desc->method.method_flags =
- AML_METHOD_INTERNAL_ONLY;
- obj_desc->method.extra.implementation =
+ obj_desc->method.info_flags =
+ ACPI_METHOD_INTERNAL_ONLY;
+ obj_desc->method.dispatch.implementation =
acpi_ut_osi_implementation;
#endif
break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1e5ff803d9ad..1d0ef15d158f 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,8 +234,8 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
* modified the namespace. This is used for cleanup when the
* method exits.
*/
- walk_state->method_desc->method.flags |=
- AOPOBJ_MODIFIED_NAMESPACE;
+ walk_state->method_desc->method.info_flags |=
+ ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
@@ -341,6 +341,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node = NULL;
u32 level = 1;
+ acpi_status status;
ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
@@ -348,6 +349,13 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
return_VOID;
}
+ /* Lock namespace for possible update */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
/*
* Traverse the tree of objects until we bubble back up
* to where we started.
@@ -397,6 +405,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
}
}
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index a54dc39e304b..b683cc2ff9d3 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -624,9 +624,22 @@ acpi_ns_dump_objects(acpi_object_type type,
acpi_owner_id owner_id, acpi_handle start_handle)
{
struct acpi_walk_info info;
+ acpi_status status;
ACPI_FUNCTION_ENTRY();
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not acquire namespace mutex\n");
+ return;
+ }
+
info.debug_level = ACPI_LV_TABLES;
info.owner_id = owner_id;
info.display_type = display_type;
@@ -636,6 +649,8 @@ acpi_ns_dump_objects(acpi_object_type type,
ACPI_NS_WALK_TEMP_NODES,
acpi_ns_dump_one_object, NULL,
(void *)&info, NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index d2a97921e249..2ed294b7a4db 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f52829cc294b..c1bd02b1a058 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -389,7 +389,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
* acpi_gbl_root_node->Object is NULL at PASS1.
*/
if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
- method_obj->method.extra.handler =
+ method_obj->method.dispatch.handler =
parent_node->object->device.handler;
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 0cac7ec0d2ec..fd7c6380e294 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index df18be94fefe..5f7dc691c183 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d3104af57e13..d5fa520c3de5 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 41a9213dd5af..3bb8bf105ea2 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 5808c89e9fac..b3234fa795b8 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7096bcda0c72..9fb03fa8ffde 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d1c136692667..1d76ac85b5e7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4ef9f43ea926..973883babee1 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 41102a84272f..28b0d7a62b99 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a7d6ad9c111b..cb1b104a69a2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 2cd5be8fe10f..345f0c3c6ad2 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index ebef8a7fd707..c53f0040e490 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b01e45a415e3..3fd4526f3dba 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -603,10 +603,9 @@ acpi_status acpi_install_method(u8 *buffer)
method_obj->method.param_count = (u8)
(method_flags & AML_METHOD_ARG_COUNT);
- method_obj->method.method_flags = (u8)
- (method_flags & ~AML_METHOD_ARG_COUNT);
-
if (method_flags & AML_METHOD_SERIALIZED) {
+ method_obj->method.info_flags = ACPI_METHOD_SERIALIZED;
+
method_obj->method.sync_level = (u8)
((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index a1f04e9b8030..db7660f8b869 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 7df1a4c95274..e1fad0ee0136 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 2f2e7760938c..01dd70d1de51 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -655,7 +655,7 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
method_obj->method.aml_start = aml_start;
method_obj->method.aml_length = aml_length;
method_obj->method.owner_id = owner_id;
- method_obj->method.flags |= AOPOBJ_MODULE_LEVEL;
+ method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
/*
* Save the parent node in next_object. This is cheating, but we
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 2b0c3be2b1b8..bed08de7528c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8d81542194d4..9bb0cbd37b5e 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,6 @@
#include "acparser.h"
#include "acdispat.h"
#include "amlcode.h"
-#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_PARSER
@@ -539,24 +538,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
/* Check for possible multi-thread reentrancy problem */
if ((status == AE_ALREADY_EXISTS) &&
- (!walk_state->method_desc->method.mutex)) {
- ACPI_INFO((AE_INFO,
- "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
- walk_state->method_node->name.
- ascii));
-
+ (!(walk_state->method_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED))) {
/*
- * Method tried to create an object twice. The probable cause is
- * that the method cannot handle reentrancy.
- *
- * The method is marked not_serialized, but it tried to create
- * a named object, causing the second thread entrance to fail.
- * Workaround this problem by marking the method permanently
- * as Serialized.
+ * Method is not serialized and tried to create an object
+ * twice. The probable cause is that the method cannot
+ * handle reentrancy. Mark as "pending serialized" now, and
+ * then mark "serialized" when the last thread exits.
*/
- walk_state->method_desc->method.method_flags |=
- AML_METHOD_SERIALIZED;
- walk_state->method_desc->method.sync_level = 0;
+ walk_state->method_desc->method.info_flags |=
+ ACPI_METHOD_SERIALIZED_PENDING;
}
}
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 40e2b279ea12..a5faa1323a02 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index d4b970c3630b..f1464c03aa42 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index fe29eee5adb1..7eda78503422 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8abb9629443d..3312d6368bf1 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c42f067cff9d..8086805d4494 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "actables.h"
-#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psxface")
@@ -285,15 +284,15 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
goto cleanup;
}
- if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
+ if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
}
/* Invoke an internal method if necessary */
- if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+ if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
status =
- info->obj_desc->method.extra.implementation(walk_state);
+ info->obj_desc->method.dispatch.implementation(walk_state);
info->return_object = walk_state->return_desc;
/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 226c806ae986..9e66f9078426 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index d6ebf7ec622d..3a8a89ec2ca4 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index c80a2eea3a01..4ce6e1147e80 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f859b0386fe4..33db7520c74b 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 1fd868b964fd..f9ea60872aa4 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 33bff17c0bbc..0c7efef008be 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 545da40d7fa7..50b8ad211167 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 7335f22aac20..1bfcef736c50 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 887b8ba8c432..7cc6d8625f1e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index f8cd9e87d987..410264b22a29 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 491191e6cf69..231811e56939 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 9f6a6e7e1c8e..2ff657a28f26 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index d2ff4325c427..428d44e2d162 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 989d5c867864..a55cb2bb5abb 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 83d7af8d0905..48db0944ce4a 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 34f9c2bc5e1f..0f2d395feaba 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4a8b9e6ea57a..4b7085dfc683 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index fd2c07d1d3ac..7eb6c6cc1edf 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8f0896281567..0a697351cf69 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 6fef83f04bcd..aded299a2fa8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index f21c486929a5..a9bcd816dc29 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index ed794cd033ea..31f5a7832ef1 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 22f59ef604e0..18f73c9d10bc 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 508537f884ac..97dd9bbf055a 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index d2906328535d..b679ea693545 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c1b1c803ea9b..191b6828cce9 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b081cd46a15f..f6bb75c6faf5 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49cf7b7fd816..ce481da9bb45 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c7d0e05ef5a4..c33a852d4f42 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 199528ff7f1d..a946c689f03b 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index fd1fa2749ea5..188340a017b4 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 18c59a85fdca..1fb10cb8f11d 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 7965919000b1..84e051844247 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index d35d109b8da2..30c21e1a9360 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 1f484c9a6888..98ad125e14ff 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6f12e314fbae..916ae097c43c 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 68bc227e7c4c..ac1a599f5147 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -998,7 +998,6 @@ static int acpi_battery_resume(struct acpi_device *device)
if (!device)
return -EINVAL;
battery = acpi_driver_data(device);
- acpi_battery_refresh(battery);
battery->update_time = 0;
acpi_battery_update(battery);
return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 54b6ab8040a6..fa5a1df42b79 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <acpi/acpiosxf.h>
/*
@@ -80,7 +81,7 @@ void suspend_nvs_free(void)
free_page((unsigned long)entry->data);
entry->data = NULL;
if (entry->kaddr) {
- acpi_os_unmap_memory(entry->kaddr, entry->size);
+ iounmap(entry->kaddr);
entry->kaddr = NULL;
}
}
@@ -114,8 +115,8 @@ int suspend_nvs_save(void)
list_for_each_entry(entry, &nvs_list, node)
if (entry->data) {
- entry->kaddr = acpi_os_map_memory(entry->phys_start,
- entry->size);
+ entry->kaddr = acpi_os_ioremap(entry->phys_start,
+ entry->size);
if (!entry->kaddr) {
suspend_nvs_free();
return -ENOMEM;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e2dd6de5d50c..b0931818cf98 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -38,6 +38,7 @@
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
@@ -302,9 +303,10 @@ void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map, *tmp_map;
- unsigned long flags, pg_sz;
+ unsigned long flags;
void __iomem *virt;
- phys_addr_t pg_off;
+ acpi_physical_address pg_off;
+ acpi_size pg_sz;
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -320,7 +322,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = ioremap_cache(pg_off, pg_sz);
+ virt = acpi_os_ioremap(pg_off, pg_sz);
if (!virt) {
kfree(map);
return NULL;
@@ -642,7 +644,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
rcu_read_unlock();
if (!virt_addr) {
- virt_addr = ioremap_cache(phys_addr, size);
+ virt_addr = acpi_os_ioremap(phys_addr, size);
unmap = 1;
}
if (!value)
@@ -678,7 +680,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
rcu_read_unlock();
if (!virt_addr) {
- virt_addr = ioremap_cache(phys_addr, size);
+ virt_addr = acpi_os_ioremap(phys_addr, size);
unmap = 1;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fdd3aeeb6def..d6a8cd14de2e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -166,6 +166,7 @@ static void acpi_pm_finish(void)
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
+ suspend_nvs_free();
if (acpi_state == ACPI_STATE_S0)
return;
@@ -186,7 +187,6 @@ static void acpi_pm_finish(void)
*/
static void acpi_pm_end(void)
{
- suspend_nvs_free();
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c6b298d4c136..c2328aed0836 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -783,7 +783,7 @@ config PATA_PCMCIA
config PATA_PLATFORM
tristate "Generic platform device PATA support"
- depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
+ depends on EXPERT || PPC || HAVE_PATA_PLATFORM
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems.
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bca9cb89a118..487a54739854 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -151,7 +151,7 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
if (arg == NULL)
return 0;
- return copy_to_user(arg, &PRIV(dev)->stats,
+ return copy_to_user(arg, &stats,
sizeof(struct idt77105_stats)) ? -EFAULT : 0;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd96345bc35c..d57e8d0fb823 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,7 +70,7 @@ config PREVENT_FIRMWARE_BUILD
If unsure say Y here.
config FW_LOADER
- tristate "Userspace firmware loading support" if EMBEDDED
+ tristate "Userspace firmware loading support" if EXPERT
default y
---help---
This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0f175a866ef0..b7980a83ce2d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -5,7 +5,7 @@
menu "Character devices"
config VT
- bool "Virtual terminal" if EMBEDDED
+ bool "Virtual terminal" if EXPERT
depends on !S390
select INPUT
default y
@@ -39,13 +39,13 @@ config VT
config CONSOLE_TRANSLATIONS
depends on VT
default y
- bool "Enable character translations in console" if EMBEDDED
+ bool "Enable character translations in console" if EXPERT
---help---
This enables support for font mapping and Unicode translation
on virtual consoles.
config VT_CONSOLE
- bool "Support for console on virtual terminal" if EMBEDDED
+ bool "Support for console on virtual terminal" if EXPERT
depends on VT
default y
---help---
@@ -426,10 +426,10 @@ config SGI_MBCS
If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N.
-source "drivers/serial/Kconfig"
+source "drivers/tty/serial/Kconfig"
config UNIX98_PTYS
- bool "Unix98 PTY support" if EMBEDDED
+ bool "Unix98 PTY support" if EXPERT
default y
---help---
A pseudo terminal (PTY) is a software device consisting of two
@@ -495,7 +495,7 @@ config LEGACY_PTY_COUNT
config TTY_PRINTK
bool "TTY driver to output user messages via printk"
- depends on EMBEDDED
+ depends on EXPERT
default n
---help---
If you say Y here, the support for writing user messages (i.e.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 1e9dffb33778..5bc765d4c3ca 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,25 +30,12 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
-obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
-obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
-obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
-obj-$(CONFIG_HVC_TILE) += hvc_tile.o
-obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
-obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
-obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
-obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
-obj-$(CONFIG_HVC_XEN) += hvc_xen.o
-obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
-obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
-obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_VIOTAPE) += viotape.o
-obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_IBM_BSR) += bsr.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 1f46f1cd9225..36e0fa161c2b 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,12 +364,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED)
+ if (duration_idx != TPM_UNDEFINED) {
duration = chip->vendor.duration[duration_idx];
- if (duration <= 0)
+ /* if duration is 0, it's because chip->vendor.duration wasn't */
+ /* filled yet, so we set the lowest timeout just to give enough */
+ /* time for tpm_get_timeouts() to succeed */
+ return (duration <= 0 ? HZ : duration);
+ } else
return 2 * 60 * HZ;
- else
- return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c17a305ecb28..dd21df55689d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -493,9 +493,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
- if (is_itpm(to_pnp_dev(dev)))
- itpm = 1;
-
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
@@ -637,6 +634,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
else
interrupts = 0;
+ if (is_itpm(pnp_dev))
+ itpm = 1;
+
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index cfb0f5278415..effe7974aa9a 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -202,17 +202,21 @@ static int __init init_acpi_pm_clocksource(void)
printk(KERN_INFO "PM-Timer had inconsistent results:"
" 0x%#llx, 0x%#llx - aborting.\n",
value1, value2);
+ pmtmr_ioport = 0;
return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
printk(KERN_INFO "PM-Timer failed consistency check "
" (0x%#llx) - aborting.\n", value1);
+ pmtmr_ioport = 0;
return -ENODEV;
}
}
- if (verify_pmtmr_rate() != 0)
+ if (verify_pmtmr_rate() != 0){
+ pmtmr_ioport = 0;
return -ENODEV;
+ }
return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a8c8d9c19d74..ca8ee8093d6c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,7 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
config CPU_FREQ_DEFAULT_GOV_POWERSAVE
bool "powersave"
- depends on EMBEDDED
+ depends on EXPERT
select CPU_FREQ_GOV_POWERSAVE
help
Use the CPUFreq governor 'powersave' as default. This sets
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 68f942cb30f2..0c56989cd907 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -49,15 +49,13 @@ config FIREWIRE_SBP2
configuration section.
config FIREWIRE_NET
- tristate "IP networking over 1394 (EXPERIMENTAL)"
- depends on FIREWIRE && INET && EXPERIMENTAL
+ tristate "IP networking over 1394"
+ depends on FIREWIRE && INET
help
This enables IPv4 over IEEE 1394, providing IP connectivity with
other implementations of RFC 2734 as found on several operating
systems. Multicast support is currently limited.
- NOTE, this driver is not stable yet!
-
To compile this driver as a module, say M here: The module will be
called firewire-net.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index be0492398ef9..24ff35511e2b 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_IRMC ((1) << 31)
#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
+#define CANON_OUI 0x000085
+
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
@@ -284,6 +286,7 @@ static void bm_work(struct work_struct *work)
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
+ bool keep_this_irm;
spin_lock_irq(&card->lock);
@@ -305,6 +308,10 @@ static void bm_work(struct work_struct *work)
irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
(irm_device->config_rom[2] & 0x000000f0) == 0;
+ /* Canon MV5i works unreliably if it is not root node. */
+ keep_this_irm = irm_device && irm_device->config_rom &&
+ irm_device->config_rom[3] >> 8 == CANON_OUI;
+
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
@@ -333,7 +340,7 @@ static void bm_work(struct work_struct *work)
goto pick_me;
}
- if (irm_is_1394_1995_only) {
+ if (irm_is_1394_1995_only && !keep_this_irm) {
new_root_id = local_id;
fw_notify("%s, making local node (%02x) root.\n",
"IRM is not 1394a compliant", new_root_id);
@@ -382,7 +389,7 @@ static void bm_work(struct work_struct *work)
spin_lock_irq(&card->lock);
- if (rcode != RCODE_COMPLETE) {
+ if (rcode != RCODE_COMPLETE && !keep_this_irm) {
/*
* The lock request failed, maybe the IRM
* isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index c2e194c58667..7ed08fd1214e 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -191,6 +191,7 @@ struct fwnet_peer {
struct fwnet_device *dev;
u64 guid;
u64 fifo;
+ __be32 ip;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
@@ -570,6 +571,8 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
peer->speed = sspd;
if (peer->max_payload > max_payload)
peer->max_payload = max_payload;
+
+ peer->ip = arp1394->sip;
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -1470,6 +1473,7 @@ static int fwnet_add_peer(struct fwnet_device *dev,
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
peer->fifo = FWNET_NO_FIFO_ADDR;
+ peer->ip = 0;
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
@@ -1589,10 +1593,13 @@ static int fwnet_remove(struct device *_dev)
mutex_lock(&fwnet_device_mutex);
+ net = dev->netdev;
+ if (net && peer->ip)
+ arp_invalidate(net, peer->ip);
+
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
- net = dev->netdev;
unregister_netdev(net);
if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e8b6a13515bd..e710424b59ea 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -27,7 +27,7 @@ config EDD_OFF
using the kernel parameter 'edd={on|skipmbr|off}'.
config FIRMWARE_MEMMAP
- bool "Add firmware-provided memory map to sysfs" if EMBEDDED
+ bool "Add firmware-provided memory map to sysfs" if EXPERT
default X86
help
Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 64828a7db77b..bea966f8ac84 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@ config DRM_KMS_HELPER
tristate
depends on DRM
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
help
FB and CRTC helpers for KMS drivers.
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5c4f9b9ecdc0..6977a1ce9d98 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1533,11 +1533,11 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03e337072517..f6b9baa6a63d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -928,6 +928,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
+ int reread = 0;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
@@ -940,9 +941,8 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
* fallback to the slow and accurate path.
*/
head = intel_read_status_page(ring, 4);
- if (head < ring->actual_head)
+ if (reread)
head = I915_READ_HEAD(ring);
- ring->actual_head = head;
ring->head = head & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
@@ -961,6 +961,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
msleep(1);
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
+ reread = 1;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index be9087e4c9be..5b0abfa881fc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -47,7 +47,6 @@ struct intel_ring_buffer {
struct drm_device *dev;
struct drm_i915_gem_object *obj;
- u32 actual_head;
u32 head;
u32 tail;
int space;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 21d6c29c2d21..de70959b9ed5 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,7 +8,7 @@ config DRM_NOUVEAU
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
help
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a22027..96c83a9a76bb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
config VGA_ARB
- bool "VGA Arbitration" if EMBEDDED
+ bool "VGA Arbitration" if EXPERT
default y
depends on PCI
help
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 24cca2f69dfc..2560f01c1a63 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -62,9 +62,9 @@ config HID_3M_PCT
Support for 3M PCT touch screens.
config HID_A4TECH
- tristate "A4 tech mice" if EMBEDDED
+ tristate "A4 tech mice" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for A4 tech X5 and WOP-35 / Trust 450L mice.
@@ -77,9 +77,9 @@ config HID_ACRUX_FF
game controllers.
config HID_APPLE
- tristate "Apple {i,Power,Mac}Books" if EMBEDDED
+ tristate "Apple {i,Power,Mac}Books" if EXPERT
depends on (USB_HID || BT_HIDP)
- default !EMBEDDED
+ default !EXPERT
---help---
Support for some Apple devices which less or more break
HID specification.
@@ -88,9 +88,9 @@ config HID_APPLE
MacBooks, MacBook Pros and Apple Aluminum.
config HID_BELKIN
- tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED
+ tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Belkin Flip KVM and Wireless keyboard.
@@ -101,16 +101,16 @@ config HID_CANDO
Support for Cando dual touch panel.
config HID_CHERRY
- tristate "Cherry Cymotion keyboard" if EMBEDDED
+ tristate "Cherry Cymotion keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Cherry Cymotion keyboard.
config HID_CHICONY
- tristate "Chicony Tactical pad" if EMBEDDED
+ tristate "Chicony Tactical pad" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Chicony Tactical pad.
@@ -130,9 +130,9 @@ config HID_PRODIKEYS
and some additional multimedia keys.
config HID_CYPRESS
- tristate "Cypress mouse and barcode readers" if EMBEDDED
+ tristate "Cypress mouse and barcode readers" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for cypress mouse and barcode readers.
@@ -174,16 +174,16 @@ config HID_ELECOM
Support for the ELECOM BM084 (bluetooth mouse).
config HID_EZKEY
- tristate "Ezkey BTC 8193 keyboard" if EMBEDDED
+ tristate "Ezkey BTC 8193 keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Ezkey BTC 8193 keyboard.
config HID_KYE
- tristate "Kye/Genius Ergo Mouse" if EMBEDDED
+ tristate "Kye/Genius Ergo Mouse" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Kye/Genius Ergo Mouse.
@@ -212,16 +212,16 @@ config HID_TWINHAN
Support for Twinhan IR remote control.
config HID_KENSINGTON
- tristate "Kensington Slimblade Trackball" if EMBEDDED
+ tristate "Kensington Slimblade Trackball" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Kensington Slimblade Trackball.
config HID_LOGITECH
- tristate "Logitech devices" if EMBEDDED
+ tristate "Logitech devices" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Logitech devices that are not fully compliant with HID standard.
@@ -276,9 +276,9 @@ config HID_MAGICMOUSE
Apple Wireless "Magic" Mouse.
config HID_MICROSOFT
- tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED
+ tristate "Microsoft non-fully HID-compliant devices" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
@@ -289,9 +289,9 @@ config HID_MOSART
Support for MosArt dual-touch panels.
config HID_MONTEREY
- tristate "Monterey Genius KB29E keyboard" if EMBEDDED
+ tristate "Monterey Genius KB29E keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Monterey Genius KB29E.
@@ -365,8 +365,8 @@ config HID_PICOLCD
- IR
config HID_PICOLCD_FB
- bool "Framebuffer support" if EMBEDDED
- default !EMBEDDED
+ bool "Framebuffer support" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=FB || FB=y
select FB_DEFERRED_IO
@@ -379,8 +379,8 @@ config HID_PICOLCD_FB
frambuffer device.
config HID_PICOLCD_BACKLIGHT
- bool "Backlight control" if EMBEDDED
- default !EMBEDDED
+ bool "Backlight control" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
---help---
@@ -388,16 +388,16 @@ config HID_PICOLCD_BACKLIGHT
class.
config HID_PICOLCD_LCD
- bool "Contrast control" if EMBEDDED
- default !EMBEDDED
+ bool "Contrast control" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
---help---
Provide access to PicoLCD's LCD contrast via lcd class.
config HID_PICOLCD_LEDS
- bool "GPO via leds class" if EMBEDDED
- default !EMBEDDED
+ bool "GPO via leds class" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
---help---
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 4edb3bef94a6..0f20fd17cf06 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -45,7 +45,7 @@ config USB_HIDDEV
If unsure, say Y.
menu "USB HID Boot Protocol drivers"
- depends on USB!=n && USB_HID!=y && EMBEDDED
+ depends on USB!=n && USB_HID!=y && EXPERT
config USB_KBD
tristate "USB HIDBP Keyboard (simple Boot) support"
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 98ccfeb3f5aa..9827c5e686cb 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -134,7 +134,7 @@ config BLK_DEV_IDECD
module will be called ide-cd.
config BLK_DEV_IDECD_VERBOSE_ERRORS
- bool "Verbose error logging for IDE/ATAPI CDROM driver" if EMBEDDED
+ bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
depends on BLK_DEV_IDECD
default y
help
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 7acb32e7f817..1fa091e05690 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -263,7 +263,7 @@ static void __setup_broadcast_timer(void *arg)
clockevents_notify(reason, &cpu);
}
-static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
@@ -273,15 +273,11 @@ static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
smp_call_function_single(hotcpu, __setup_broadcast_timer,
(void *)true, 1);
break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(hotcpu, __setup_broadcast_timer,
- (void *)false, 1);
- break;
}
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata setup_broadcast_notifier = {
+static struct notifier_block setup_broadcast_notifier = {
.notifier_call = setup_broadcast_cpuhp_notify,
};
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 03efc074967e..da314c3fec23 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -7,7 +7,7 @@ config INFINIBAND_MTHCA
("Tavor") and the MT25208 PCI Express HCA ("Arbel").
config INFINIBAND_MTHCA_DEBUG
- bool "Verbose debugging output" if EMBEDDED
+ bool "Verbose debugging output" if EXPERT
depends on INFINIBAND_MTHCA
default y
---help---
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 55855eeabae7..cda8eac55fff 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -24,7 +24,7 @@ config INFINIBAND_IPOIB_CM
unless you limit mtu for these destinations to 2044.
config INFINIBAND_IPOIB_DEBUG
- bool "IP-over-InfiniBand debugging" if EMBEDDED
+ bool "IP-over-InfiniBand debugging" if EXPERT
depends on INFINIBAND_IPOIB
default y
---help---
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 07c2cd43109c..1903c0f5b925 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -6,7 +6,7 @@ menu "Input device support"
depends on !S390
config INPUT
- tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
+ tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
default y
help
Say Y here if you have any input device (mouse, keyboard, tablet,
@@ -67,7 +67,7 @@ config INPUT_SPARSEKMAP
comment "Userland interfaces"
config INPUT_MOUSEDEV
- tristate "Mouse interface" if EMBEDDED
+ tristate "Mouse interface" if EXPERT
default y
help
Say Y here if you want your mouse to be accessible as char devices
@@ -150,7 +150,7 @@ config INPUT_EVBUG
module will be called evbug.
config INPUT_APMPOWER
- tristate "Input Power Event -> APM Bridge" if EMBEDDED
+ tristate "Input Power Event -> APM Bridge" if EXPERT
depends on INPUT && APM_EMULATION
help
Say Y here if you want suspend key events to trigger a user
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7b3c0b8fa432..417507348bab 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
menuconfig INPUT_KEYBOARD
- bool "Keyboards" if EMBEDDED || !X86
+ bool "Keyboards" if EXPERT || !X86
default y
help
Say Y here, and a list of supported keyboards will be displayed.
@@ -57,7 +57,7 @@ config KEYBOARD_ATARI
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EMBEDDED || !X86
+ tristate "AT keyboard" if EXPERT || !X86
default y
select SERIO
select SERIO_LIBPS2
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index bf5fd7f6a313..9c1e6ee83531 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -39,7 +39,7 @@ config MOUSE_PS2
module will be called psmouse.
config MOUSE_PS2_ALPS
- bool "ALPS PS/2 mouse protocol extension" if EMBEDDED
+ bool "ALPS PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -49,7 +49,7 @@ config MOUSE_PS2_ALPS
If unsure, say Y.
config MOUSE_PS2_LOGIPS2PP
- bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED
+ bool "Logitech PS/2++ mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -59,7 +59,7 @@ config MOUSE_PS2_LOGIPS2PP
If unsure, say Y.
config MOUSE_PS2_SYNAPTICS
- bool "Synaptics PS/2 mouse protocol extension" if EMBEDDED
+ bool "Synaptics PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -69,7 +69,7 @@ config MOUSE_PS2_SYNAPTICS
If unsure, say Y.
config MOUSE_PS2_LIFEBOOK
- bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
+ bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2 && X86 && DMI
help
@@ -79,7 +79,7 @@ config MOUSE_PS2_LIFEBOOK
If unsure, say Y.
config MOUSE_PS2_TRACKPOINT
- bool "IBM Trackpoint PS/2 mouse protocol extension" if EMBEDDED
+ bool "IBM Trackpoint PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 307eef77a172..55f2c2293ec6 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
config SERIO
- tristate "Serial I/O support" if EMBEDDED || !X86
+ tristate "Serial I/O support" if EXPERT || !X86
default y
help
Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@ config SERIO
if SERIO
config SERIO_I8042
- tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
+ tristate "i8042 PC Keyboard controller" if EXPERT || !X86
default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@ config SERIO_MACEPS2
module will be called maceps2.
config SERIO_LIBPS2
- tristate "PS/2 driver library" if EMBEDDED
+ tristate "PS/2 driver library" if EXPERT
depends on SERIO_I8042 || SERIO_I8042=n
help
Say Y here if you are using a driver for device connected
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0c9f4b158ff0..61834ae282e1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -540,62 +540,62 @@ config TOUCHSCREEN_MC13783
config TOUCHSCREEN_USB_EGALAX
default y
- bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
+ bool "eGalax, eTurboTouch CT-410/510/700 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_PANJIT
default y
- bool "PanJit device support" if EMBEDDED
+ bool "PanJit device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_3M
default y
- bool "3M/Microtouch EX II series device support" if EMBEDDED
+ bool "3M/Microtouch EX II series device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ITM
default y
- bool "ITM device support" if EMBEDDED
+ bool "ITM device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ETURBO
default y
- bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED
+ bool "eTurboTouch (non-eGalax compatible) device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GUNZE
default y
- bool "Gunze AHL61 device support" if EMBEDDED
+ bool "Gunze AHL61 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_DMC_TSC10
default y
- bool "DMC TSC-10/25 device support" if EMBEDDED
+ bool "DMC TSC-10/25 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_IRTOUCH
default y
- bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED
+ bool "IRTOUCHSYSTEMS/UNITOP device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_IDEALTEK
default y
- bool "IdealTEK URTC1000 device support" if EMBEDDED
+ bool "IdealTEK URTC1000 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GENERAL_TOUCH
default y
- bool "GeneralTouch Touchscreen device support" if EMBEDDED
+ bool "GeneralTouch Touchscreen device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GOTOP
default y
- bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
+ bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_JASTEC
default y
- bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED
+ bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_E2I
@@ -605,17 +605,17 @@ config TOUCHSCREEN_USB_E2I
config TOUCHSCREEN_USB_ZYTRONIC
default y
- bool "Zytronic controller" if EMBEDDED
+ bool "Zytronic controller" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ETT_TC45USB
default y
- bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED
+ bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_NEXIO
default y
- bool "NEXIO/iNexio device support" if EMBEDDED
+ bool "NEXIO/iNexio device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 991d93be0f44..ecc4bf3f37a9 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -99,7 +99,7 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
- return sprintf(buf, "%s\n", gpio_data->inverted ? "yes" : "no");
+ return sprintf(buf, "%u\n", gpio_data->inverted);
}
static ssize_t gpio_trig_inverted_store(struct device *dev,
@@ -107,16 +107,17 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
- unsigned inverted;
+ unsigned long inverted;
int ret;
- ret = sscanf(buf, "%u", &inverted);
- if (ret < 1) {
- dev_err(dev, "invalid value\n");
+ ret = strict_strtoul(buf, 10, &inverted);
+ if (ret < 0)
+ return ret;
+
+ if (inverted > 1)
return -EINVAL;
- }
- gpio_data->inverted = !!inverted;
+ gpio_data->inverted = inverted;
/* After inverting, we need to update the LED. */
schedule_work(&gpio_data->work);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 04b22128a474..d21578ee95de 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1137,7 +1137,7 @@ void free_guest_pagetable(struct lguest *lg)
*/
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
- pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+ pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
pte_t regs_pte;
#ifdef CONFIG_X86_PAE
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index b4eb675a807e..9f1659c3d1f3 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
- if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
- __get_cpu_var(lg_last_cpu) = cpu;
+ if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __this_cpu_write(lg_last_cpu, cpu);
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 2e041fd0a00c..f3a29f264db9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -443,7 +443,7 @@ static int fan_read_reg(int reg, unsigned char *buf, int nb)
tries = 0;
for (;;) {
nr = i2c_master_recv(fcu, buf, nb);
- if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100)
+ if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
break;
msleep(10);
++tries;
@@ -464,7 +464,7 @@ static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
tries = 0;
for (;;) {
nw = i2c_master_send(fcu, buf, nb);
- if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100)
+ if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
break;
msleep(10);
++tries;
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 982f000a57ff..9f47e383c57a 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -452,7 +452,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
dev->ext = ext;
- mutex_init(&dev->lock);
+ mutex_init(&dev->v4l2_lock);
spin_lock_init(&dev->int_slock);
spin_lock_init(&dev->slock);
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index e3fedc60fe77..1bd3dd762c6b 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -15,18 +15,15 @@ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
}
/* is it free? */
- mutex_lock(&dev->lock);
if (vv->resources & bit) {
DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
/* no, someone else uses it */
- mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
vv->resources |= bit;
DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
- mutex_unlock(&dev->lock);
return 1;
}
@@ -37,11 +34,9 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
BUG_ON((fh->resources & bits) != bits);
- mutex_lock(&dev->lock);
fh->resources &= ~bits;
vv->resources &= ~bits;
DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
- mutex_unlock(&dev->lock);
}
@@ -396,7 +391,7 @@ static const struct v4l2_file_operations video_fops =
.write = fops_write,
.poll = fops_poll,
.mmap = fops_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static void vv_callback(struct saa7146_dev *dev, unsigned long status)
@@ -505,6 +500,7 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
vfd->fops = &video_fops;
vfd->ioctl_ops = &dev->ext_vv_data->ops;
vfd->release = video_device_release;
+ vfd->lock = &dev->v4l2_lock;
vfd->tvnorms = 0;
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 2d4533ab22b7..afe85801d6ca 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
sizeof(struct saa7146_buf),
- file, NULL);
+ file, &dev->v4l2_lock);
init_timer(&fh->vbi_read_timeout);
fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 0ac5c619aecf..9aafa4e969a8 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -553,8 +553,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
}
}
- mutex_lock(&dev->lock);
-
/* ok, accept it */
vv->ov_fb = *fb;
vv->ov_fmt = fmt;
@@ -563,8 +561,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
}
-
- mutex_unlock(&dev->lock);
return 0;
}
@@ -649,8 +645,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
return -EINVAL;
}
- mutex_lock(&dev->lock);
-
switch (ctrl->type) {
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
@@ -693,7 +687,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
/* fixme: we can support changing VFLIP and HFLIP here... */
if (IS_CAPTURE_ACTIVE(fh) != 0) {
DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
- mutex_unlock(&dev->lock);
return -EBUSY;
}
vv->hflip = c->value;
@@ -701,16 +694,13 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_VFLIP:
if (IS_CAPTURE_ACTIVE(fh) != 0) {
DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
- mutex_unlock(&dev->lock);
return -EBUSY;
}
vv->vflip = c->value;
break;
default:
- mutex_unlock(&dev->lock);
return -EINVAL;
}
- mutex_unlock(&dev->lock);
if (IS_OVERLAY_ACTIVE(fh) != 0) {
saa7146_stop_preview(fh);
@@ -902,22 +892,18 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
err = vidioc_try_fmt_vid_overlay(file, fh, f);
if (0 != err)
return err;
- mutex_lock(&dev->lock);
fh->ov.win = f->fmt.win;
fh->ov.nclips = f->fmt.win.clipcount;
if (fh->ov.nclips > 16)
fh->ov.nclips = 16;
if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
sizeof(struct v4l2_clip) * fh->ov.nclips)) {
- mutex_unlock(&dev->lock);
return -EFAULT;
}
/* fh->ov.fh is used to indicate that we have valid overlay informations, too */
fh->ov.fh = fh;
- mutex_unlock(&dev->lock);
-
/* check if our current overlay is active */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
saa7146_stop_preview(fh);
@@ -976,8 +962,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
}
}
- mutex_lock(&dev->lock);
-
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
if (*id & dev->ext_vv_data->stds[i].id)
break;
@@ -988,8 +972,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
found = 1;
}
- mutex_unlock(&dev->lock);
-
if (vv->ov_suspend != NULL) {
saa7146_start_preview(vv->ov_suspend);
vv->ov_suspend = NULL;
@@ -1354,7 +1336,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7146_buf),
- file, NULL);
+ file, &dev->v4l2_lock);
return 0;
}
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 78b089526e02..6fc79f15dcbc 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@ config MEDIA_TUNER
config MEDIA_TUNER_CUSTOMISE
bool "Customize analog and hybrid tuner modules to build"
depends on MEDIA_TUNER
- default y if EMBEDDED
+ default y if EXPERT
help
This allows the user to deselect tuner drivers unnecessary
for their hardware from the build. Use this option with care
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c9062ceddc71..bc6a67768af1 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -95,8 +95,7 @@ static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close)
msleep(20);
} else {
msg = disable;
- tuner_i2c_xfer_send(&priv->i2c_props, msg, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &msg[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
buf[2] = msg[1];
buf[2] &= ~0x04;
@@ -233,19 +232,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
}
+
tda8290_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
for (i = 0; i < 3; i++) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1, &pll_stat, 1);
if (pll_stat & 0x80) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_adc_sat, 1,
+ &adc_sat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1,
+ &agc_stat, 1);
tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
break;
} else {
@@ -259,20 +261,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
agc_stat, adc_sat, pll_stat & 0x80);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
msleep(100);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1, &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1, &pll_stat, 1);
if ((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
agc_stat, pll_stat & 0x80);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
msleep(100);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1,
+ &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1,
+ &pll_stat, 1);
if((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
@@ -284,10 +288,12 @@ static void tda8290_set_params(struct dvb_frontend *fe,
/* l/ l' deadlock? */
if(priv->tda8290_easy_mode & 0x60) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_adc_sat, 1,
+ &adc_sat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1,
+ &pll_stat, 1);
if ((adc_sat > 20) || !(pll_stat & 0x80)) {
tuner_dbg("trying to resolve SECAM L deadlock\n");
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
@@ -307,8 +313,7 @@ static void tda8295_power(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01;
@@ -323,8 +328,7 @@ static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x01, 0x00 };
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
@@ -353,8 +357,7 @@ static void tda8295_agc1_out(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] &= ~0x40;
@@ -370,10 +373,10 @@ static void tda8295_agc2_out(struct dvb_frontend *fe, int enable)
unsigned char set_gpio_cf[] = { 0x44, 0x00 };
unsigned char set_gpio_val[] = { 0x46, 0x00 };
- tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_cf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_cf[1], 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_val[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_val[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &set_gpio_val[0], 1, &set_gpio_val[1], 1);
set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
@@ -392,8 +395,7 @@ static int tda8295_has_signal(struct dvb_frontend *fe)
unsigned char hvpll_stat = 0x26;
unsigned char ret;
- tuner_i2c_xfer_send(&priv->i2c_props, &hvpll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &ret, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
return (ret & 0x01) ? 65535 : 0;
}
@@ -413,8 +415,8 @@ static void tda8295_set_params(struct dvb_frontend *fe,
tda8295_power(fe, 1);
tda8295_agc1_out(fe, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &blanking_mode[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &blanking_mode[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &blanking_mode[0], 1, &blanking_mode[1], 1);
tda8295_set_video_std(fe);
@@ -447,8 +449,8 @@ static int tda8290_has_signal(struct dvb_frontend *fe)
unsigned char i2c_get_afc[1] = { 0x1B };
unsigned char afc = 0;
- tuner_i2c_xfer_send(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc));
- tuner_i2c_xfer_recv(&priv->i2c_props, &afc, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
return (afc & 0x80)? 65535:0;
}
@@ -654,20 +656,26 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
static int tda8290_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8290_ID 0x89
- unsigned char tda8290_id[] = { 0x1f, 0x00 };
+ u8 reg = 0x1f, id;
+ struct i2c_msg msg_read[] = {
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ };
/* detect tda8290 */
- tuner_i2c_xfer_send(i2c_props, &tda8290_id[0], 1);
- tuner_i2c_xfer_recv(i2c_props, &tda8290_id[1], 1);
+ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ __func__, reg);
+ return -ENODEV;
+ }
- if (tda8290_id[1] == TDA8290_ID) {
+ if (id == TDA8290_ID) {
if (debug)
printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
__func__, i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
-
return -ENODEV;
}
@@ -675,16 +683,23 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8295_ID 0x8a
#define TDA8295C2_ID 0x8b
- unsigned char tda8295_id[] = { 0x2f, 0x00 };
+ u8 reg = 0x2f, id;
+ struct i2c_msg msg_read[] = {
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ };
- /* detect tda8295 */
- tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
- tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
+ /* detect tda8290 */
+ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ __func__, reg);
+ return -ENODEV;
+ }
- if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
+ if ((id & 0xfe) == TDA8295_ID) {
if (debug)
printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
- __func__, (tda8295_id[1] == TDA8295_ID) ?
+ __func__, (id == TDA8295_ID) ?
"tda8295c1" : "tda8295c2",
i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
@@ -740,9 +755,11 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
sizeof(struct analog_demod_ops));
}
- if ((!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) &&
- (tda829x_find_tuner(fe) < 0))
- goto fail;
+ if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
+ tda8295_power(fe, 1);
+ if (tda829x_find_tuner(fe) < 0)
+ goto fail;
+ }
switch (priv->ver) {
case TDA8290:
@@ -786,6 +803,8 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
return fe;
fail:
+ memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
+
tda829x_release(fe);
return NULL;
}
@@ -809,8 +828,8 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
int i;
/* rule out tda9887, which would return the same byte repeatedly */
- tuner_i2c_xfer_send(&i2c_props, soft_reset, 1);
- tuner_i2c_xfer_recv(&i2c_props, buf, PROBE_BUFFER_SIZE);
+ tuner_i2c_xfer_send_recv(&i2c_props,
+ soft_reset, 1, buf, PROBE_BUFFER_SIZE);
for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
if (buf[i] != buf[0])
break;
@@ -827,13 +846,12 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
/* fall back to old probing method */
tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
- tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
- tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+ tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
if (data == 0) {
tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
- tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
- tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+ tuner_i2c_xfer_send_recv(&i2c_props,
+ &addr_dto_lsb, 1, &data, 1);
if (data == 0x7b) {
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 8ca48f76dfa9..98ffb40728e3 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -514,8 +514,8 @@ struct dib0700_rc_response {
union {
u16 system16;
struct {
- u8 system;
u8 not_system;
+ u8 system;
};
};
u8 data;
@@ -575,7 +575,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
deb_data("NEC extended protocol\n");
/* NEC extended code - 24 bits */
- keycode = poll_reply->system16 << 8 | poll_reply->data;
+ keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data;
} else {
deb_data("NEC normal protocol\n");
/* normal NEC code - 16 bits */
@@ -587,7 +587,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
deb_data("RC5 protocol\n");
/* RC5 Protocol */
toggle = poll_reply->report_id;
- keycode = poll_reply->system16 << 8 | poll_reply->data;
+ keycode = poll_reply->system << 8 | poll_reply->data;
break;
}
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index fcf3828472b8..f82d4a93feb3 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -172,7 +172,8 @@ void fdtv_unregister_rc(struct firedtv *fdtv)
void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
{
- u16 *keycode = fdtv->remote_ctrl_dev->keycode;
+ struct input_dev *idev = fdtv->remote_ctrl_dev;
+ u16 *keycode = idev->keycode;
if (code >= 0x0300 && code <= 0x031f)
code = keycode[code - 0x0300];
@@ -188,6 +189,8 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
return;
}
- input_report_key(fdtv->remote_ctrl_dev, code, 1);
- input_report_key(fdtv->remote_ctrl_dev, code, 0);
+ input_report_key(idev, code, 1);
+ input_sync(idev);
+ input_report_key(idev, code, 0);
+ input_sync(idev);
}
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index ef3e43a03199..b8519ba511e5 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
config DVB_FE_CUSTOMISE
bool "Customise the frontend modules to build"
depends on DVB_CORE
- default y if EMBEDDED
+ default y if EXPERT
help
This allows the user to select/deselect frontend drivers for their
hardware from the build.
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index ce222055526d..ba25fa0b0fc2 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -334,11 +334,11 @@ static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
if_sample_freq = 3300000; /* 3.3 MHz */
break;
case BANDWIDTH_7_MHZ:
- if_sample_freq = 3800000; /* 3.8 MHz */
+ if_sample_freq = 3500000; /* 3.5 MHz */
break;
case BANDWIDTH_8_MHZ:
default:
- if_sample_freq = 4300000; /* 4.3 MHz */
+ if_sample_freq = 4000000; /* 4.0 MHz */
break;
}
} else if (state->config.tuner == AF9013_TUNER_TDA18218) {
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 6360c681ded9..6c2e929bd79f 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -311,7 +311,7 @@ struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
return fe;
error:
- ix2505v_release(fe);
+ kfree(state);
return NULL;
}
EXPORT_SYMBOL(ix2505v_attach);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index d3ad3e75a35a..cc4acd2f920d 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -43,6 +43,8 @@ struct mb86a20s_state {
const struct mb86a20s_config *config;
struct dvb_frontend frontend;
+
+ bool need_init;
};
struct regdata {
@@ -318,7 +320,7 @@ static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
+ printk("%s: writereg error (rc == %i, reg == 0x%02x,"
" data == 0x%02x)\n", __func__, rc, reg, data);
return rc;
}
@@ -353,7 +355,7 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, msg, 2);
if (rc != 2) {
- rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc);
+ rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
return rc;
}
@@ -382,23 +384,31 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
/* Initialize the frontend */
rc = mb86a20s_writeregdata(state, mb86a20s_init);
if (rc < 0)
- return rc;
+ goto err;
if (!state->config->is_serial) {
regD5 &= ~1;
rc = mb86a20s_writereg(state, 0x50, 0xd5);
if (rc < 0)
- return rc;
+ goto err;
rc = mb86a20s_writereg(state, 0x51, regD5);
if (rc < 0)
- return rc;
+ goto err;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- return 0;
+err:
+ if (rc < 0) {
+ state->need_init = true;
+ printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
+ } else {
+ state->need_init = false;
+ dprintk("Initialization succeded.\n");
+ }
+ return rc;
}
static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
@@ -485,8 +495,22 @@ static int mb86a20s_set_frontend(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
+ dprintk("Calling tuner set parameters\n");
fe->ops.tuner_ops.set_params(fe, p);
+ /*
+ * Make it more reliable: if, for some reason, the initial
+ * device initialization doesn't happen, initialize it when
+ * a SBTVD parameters are adjusted.
+ *
+ * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+ * the agc callback logic is not called during DVB attach time,
+ * causing mb86a20s to not be initialized with Kworld SBTVD.
+ * So, this hack is needed, in order to make Kworld SBTVD to work.
+ */
+ if (state->need_init)
+ mb86a20s_initfe(fe);
+
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 122c72806916..9fc1dd0ba4c3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num > 1)
+ if (info->num < 0 || info->num > 1)
return -EINVAL;
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 3c5a4739ed70..ecdffa6aac66 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,20 +151,6 @@ config RADIO_GEMTEK_PROBE
following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
0x28c.
-config RADIO_GEMTEK_PCI
- tristate "GemTek PCI Radio Card support"
- depends on VIDEO_V4L2 && PCI
- ---help---
- Choose Y here if you have this PCI FM radio card.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video for Linux API. Information on
- this API and pointers to "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-gemtek-pci.
-
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
depends on VIDEO_V4L2 && PCI
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index d2970748a69f..717656d2f749 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
-obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 6cc5d130fbc8..4ce10dbeadd8 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,6 +31,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
+#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
deleted file mode 100644
index 28fa85ba2087..000000000000
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- ***************************************************************************
- *
- * radio-gemtek-pci.c - Gemtek PCI Radio driver
- * (C) 2001 Vladimir Shebordaev <vshebordaev@mail.ru>
- *
- ***************************************************************************
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- ***************************************************************************
- *
- * Gemtek Corp still silently refuses to release any specifications
- * of their multimedia devices, so the protocol still has to be
- * reverse engineered.
- *
- * The v4l code was inspired by Jonas Munsin's Gemtek serial line
- * radio device driver.
- *
- * Please, let me know if this piece of code was useful :)
- *
- * TODO: multiple device support and portability were not tested
- *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
- *
- ***************************************************************************
- */
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/videodev2.h>
-#include <linux/errno.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-MODULE_AUTHOR("Vladimir Shebordaev <vshebordaev@mail.ru>");
-MODULE_DESCRIPTION("The video4linux driver for the Gemtek PCI Radio Card");
-MODULE_LICENSE("GPL");
-
-static int nr_radio = -1;
-static int mx = 1;
-
-module_param(mx, bool, 0);
-MODULE_PARM_DESC(mx, "single digit: 1 - turn off the turner upon module exit (default), 0 - do not");
-module_param(nr_radio, int, 0);
-MODULE_PARM_DESC(nr_radio, "video4linux device number to use");
-
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
-#ifndef PCI_VENDOR_ID_GEMTEK
-#define PCI_VENDOR_ID_GEMTEK 0x5046
-#endif
-
-#ifndef PCI_DEVICE_ID_GEMTEK_PR103
-#define PCI_DEVICE_ID_GEMTEK_PR103 0x1001
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_LOW
-#define GEMTEK_PCI_RANGE_LOW (87*16000)
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_HIGH
-#define GEMTEK_PCI_RANGE_HIGH (108*16000)
-#endif
-
-struct gemtek_pci {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex lock;
- struct pci_dev *pdev;
-
- u32 iobase;
- u32 length;
-
- u32 current_frequency;
- u8 mute;
-};
-
-static inline struct gemtek_pci *to_gemtek_pci(struct v4l2_device *v4l2_dev)
-{
- return container_of(v4l2_dev, struct gemtek_pci, v4l2_dev);
-}
-
-static inline u8 gemtek_pci_out(u16 value, u32 port)
-{
- outw(value, port);
-
- return (u8)value;
-}
-
-#define _b0(v) (*((u8 *)&v))
-
-static void __gemtek_pci_cmd(u16 value, u32 port, u8 *last_byte, int keep)
-{
- u8 byte = *last_byte;
-
- if (!value) {
- if (!keep)
- value = (u16)port;
- byte &= 0xfd;
- } else
- byte |= 2;
-
- _b0(value) = byte;
- outw(value, port);
- byte |= 1;
- _b0(value) = byte;
- outw(value, port);
- byte &= 0xfe;
- _b0(value) = byte;
- outw(value, port);
-
- *last_byte = byte;
-}
-
-static inline void gemtek_pci_nil(u32 port, u8 *last_byte)
-{
- __gemtek_pci_cmd(0x00, port, last_byte, false);
-}
-
-static inline void gemtek_pci_cmd(u16 cmd, u32 port, u8 *last_byte)
-{
- __gemtek_pci_cmd(cmd, port, last_byte, true);
-}
-
-static void gemtek_pci_setfrequency(struct gemtek_pci *card, unsigned long frequency)
-{
- int i;
- u32 value = frequency / 200 + 856;
- u16 mask = 0x8000;
- u8 last_byte;
- u32 port = card->iobase;
-
- mutex_lock(&card->lock);
- card->current_frequency = frequency;
- last_byte = gemtek_pci_out(0x06, port);
-
- i = 0;
- do {
- gemtek_pci_nil(port, &last_byte);
- i++;
- } while (i < 9);
-
- i = 0;
- do {
- gemtek_pci_cmd(value & mask, port, &last_byte);
- mask >>= 1;
- i++;
- } while (i < 16);
-
- outw(0x10, port);
- mutex_unlock(&card->lock);
-}
-
-
-static void gemtek_pci_mute(struct gemtek_pci *card)
-{
- mutex_lock(&card->lock);
- outb(0x1f, card->iobase);
- card->mute = true;
- mutex_unlock(&card->lock);
-}
-
-static void gemtek_pci_unmute(struct gemtek_pci *card)
-{
- if (card->mute) {
- gemtek_pci_setfrequency(card, card->current_frequency);
- card->mute = false;
- }
-}
-
-static int gemtek_pci_getsignal(struct gemtek_pci *card)
-{
- int sig;
-
- mutex_lock(&card->lock);
- sig = (inb(card->iobase) & 0x08) ? 0 : 1;
- mutex_unlock(&card->lock);
- return sig;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *v)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
- strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
- snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(card->pdev));
- v->version = RADIO_VERSION;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (v->index > 0)
- return -EINVAL;
-
- strlcpy(v->name, "FM", sizeof(v->name));
- v->type = V4L2_TUNER_RADIO;
- v->rangelow = GEMTEK_PCI_RANGE_LOW;
- v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
- v->rxsubchans = V4L2_TUNER_SUB_MONO;
- v->capability = V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff * gemtek_pci_getsignal(card);
- return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- return v->index ? -EINVAL : 0;
-}
-
-static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
- return -EINVAL;
- if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
- f->frequency > GEMTEK_PCI_RANGE_HIGH)
- return -EINVAL;
- gemtek_pci_setfrequency(card, f->frequency);
- card->mute = false;
- return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (f->tuner != 0)
- return -EINVAL;
- f->type = V4L2_TUNER_RADIO;
- f->frequency = card->current_frequency;
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535, 65535);
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = card->mute;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (card->mute)
- ctrl->value = 0;
- else
- ctrl->value = 65535;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value)
- gemtek_pci_mute(card);
- else
- gemtek_pci_unmute(card);
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (ctrl->value)
- gemtek_pci_unmute(card);
- else
- gemtek_pci_mute(card);
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
-}
-
-enum {
- GEMTEK_PR103
-};
-
-static char *card_names[] __devinitdata = {
- "GEMTEK_PR103"
-};
-
-static struct pci_device_id gemtek_pci_id[] =
-{
- { PCI_VENDOR_ID_GEMTEK, PCI_DEVICE_ID_GEMTEK_PR103,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, GEMTEK_PR103 },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, gemtek_pci_id);
-
-static const struct v4l2_file_operations gemtek_pci_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
-};
-
-static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
- struct gemtek_pci *card;
- struct v4l2_device *v4l2_dev;
- int res;
-
- card = kzalloc(sizeof(struct gemtek_pci), GFP_KERNEL);
- if (card == NULL) {
- dev_err(&pdev->dev, "out of memory\n");
- return -ENOMEM;
- }
-
- v4l2_dev = &card->v4l2_dev;
- mutex_init(&card->lock);
- card->pdev = pdev;
-
- strlcpy(v4l2_dev->name, "gemtek_pci", sizeof(v4l2_dev->name));
-
- res = v4l2_device_register(&pdev->dev, v4l2_dev);
- if (res < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- kfree(card);
- return res;
- }
-
- if (pci_enable_device(pdev))
- goto err_pci;
-
- card->iobase = pci_resource_start(pdev, 0);
- card->length = pci_resource_len(pdev, 0);
-
- if (request_region(card->iobase, card->length, card_names[pci_id->driver_data]) == NULL) {
- v4l2_err(v4l2_dev, "i/o port already in use\n");
- goto err_pci;
- }
-
- strlcpy(card->vdev.name, v4l2_dev->name, sizeof(card->vdev.name));
- card->vdev.v4l2_dev = v4l2_dev;
- card->vdev.fops = &gemtek_pci_fops;
- card->vdev.ioctl_ops = &gemtek_pci_ioctl_ops;
- card->vdev.release = video_device_release_empty;
- video_set_drvdata(&card->vdev, card);
-
- gemtek_pci_mute(card);
-
- if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0)
- goto err_video;
-
- v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
- pdev->revision, card->iobase, card->iobase + card->length - 1);
-
- return 0;
-
-err_video:
- release_region(card->iobase, card->length);
-
-err_pci:
- v4l2_device_unregister(v4l2_dev);
- kfree(card);
- return -ENODEV;
-}
-
-static void __devexit gemtek_pci_remove(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct gemtek_pci *card = to_gemtek_pci(v4l2_dev);
-
- video_unregister_device(&card->vdev);
- v4l2_device_unregister(v4l2_dev);
-
- release_region(card->iobase, card->length);
-
- if (mx)
- gemtek_pci_mute(card);
-
- kfree(card);
-}
-
-static struct pci_driver gemtek_pci_driver = {
- .name = "gemtek_pci",
- .id_table = gemtek_pci_id,
- .probe = gemtek_pci_probe,
- .remove = __devexit_p(gemtek_pci_remove),
-};
-
-static int __init gemtek_pci_init(void)
-{
- return pci_register_driver(&gemtek_pci_driver);
-}
-
-static void __exit gemtek_pci_exit(void)
-{
- pci_unregister_driver(&gemtek_pci_driver);
-}
-
-module_init(gemtek_pci_init);
-module_exit(gemtek_pci_exit);
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 6459a220b0dd..5c2a9058c09f 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -77,8 +77,8 @@ MODULE_PARM_DESC(debug, "activates debug info");
/* TEA5757 pin mappings */
static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
-#define FREQ_LO (50 * 16000)
-#define FREQ_HI (150 * 16000)
+#define FREQ_LO (87 * 16000)
+#define FREQ_HI (108 * 16000)
#define FREQ_IF 171200 /* 10.7*16000 */
#define FREQ_STEP 200 /* 12.5*16 */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index dd6bd364efa0..7ecc8e657663 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@ static const struct v4l2_file_operations wl1273_fops = {
.read = wl1273_fm_fops_read,
.write = wl1273_fm_fops_write,
.poll = wl1273_fm_fops_poll,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.open = wl1273_fm_fops_open,
.release = wl1273_fm_fops_release,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index ac76dfe5b3fa..60c176fe328e 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -357,7 +357,8 @@ int si470x_start(struct si470x_device *radio)
goto done;
/* sysconfig 1 */
- radio->registers[SYSCONFIG1] = SYSCONFIG1_DE;
+ radio->registers[SYSCONFIG1] =
+ (de << 11) & SYSCONFIG1_DE; /* DE*/
retval = si470x_set_register(radio, SYSCONFIG1);
if (retval < 0)
goto done;
@@ -687,12 +688,8 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
/* driver constants */
strcpy(tuner->name, "FM");
tuner->type = V4L2_TUNER_RADIO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-#else
- tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
-#endif
/* range limits */
switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
@@ -718,12 +715,10 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
else
tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
/* If there is a reliable method of detecting an RDS channel,
then this code should check for that before setting this
RDS subchannel. */
tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-#endif
/* mono/stereo selector */
if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 80b3c319f698..1ac49139158d 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -446,27 +446,27 @@ static void ene_rx_setup(struct ene_device *dev)
select_timeout:
if (dev->rx_fan_input_inuse) {
- dev->rdev->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
+ dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
/* Fan input doesn't support timeouts, it just ends the
input with a maximum sample */
dev->rdev->min_timeout = dev->rdev->max_timeout =
- MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+ US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
ENE_FW_SAMPLE_PERIOD_FAN);
} else {
- dev->rdev->rx_resolution = MS_TO_NS(sample_period);
+ dev->rdev->rx_resolution = US_TO_NS(sample_period);
/* Theoreticly timeout is unlimited, but we cap it
* because it was seen that on one device, it
* would stop sending spaces after around 250 msec.
* Besides, this is close to 2^32 anyway and timeout is u32.
*/
- dev->rdev->min_timeout = MS_TO_NS(127 * sample_period);
- dev->rdev->max_timeout = MS_TO_NS(200000);
+ dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
+ dev->rdev->max_timeout = US_TO_NS(200000);
}
if (dev->hw_learning_and_tx_capable)
- dev->rdev->tx_resolution = MS_TO_NS(sample_period);
+ dev->rdev->tx_resolution = US_TO_NS(sample_period);
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
@@ -801,7 +801,7 @@ static irqreturn_t ene_isr(int irq, void *data)
dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
- ev.duration = MS_TO_NS(hw_sample);
+ ev.duration = US_TO_NS(hw_sample);
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
@@ -821,7 +821,7 @@ static void ene_setup_default_settings(struct ene_device *dev)
dev->learning_mode_enabled = learning_mode_force;
/* Set reasonable default timeout */
- dev->rdev->timeout = MS_TO_NS(150000);
+ dev->rdev->timeout = US_TO_NS(150000);
}
/* Upload all hardware settings at once. Used at load and resume time */
@@ -1004,6 +1004,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* validate resources */
error = -ENODEV;
+ /* init these to -1, as 0 is valid for both */
+ dev->hw_io = -1;
+ dev->irq = -1;
+
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto error;
@@ -1072,6 +1076,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
rdev->input_name = "ENE eHome Infrared Remote Transceiver";
}
+ dev->rdev = rdev;
+
ene_rx_setup_hw_buffer(dev);
ene_setup_default_settings(dev);
ene_setup_hw_settings(dev);
@@ -1083,7 +1089,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (error < 0)
goto error;
- dev->rdev = rdev;
ene_notice("driver has been succesfully loaded");
return 0;
error:
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index c179baf34cb4..337a41d4450b 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -201,8 +201,6 @@
#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__)
-#define MS_TO_NS(msec) ((msec) * 1000)
-
struct ene_device {
struct pnp_dev *pnp_dev;
struct rc_dev *rdev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6811512b4e83..e7dc6b46fdfa 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -988,7 +988,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
- bool pad_mouse;
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
@@ -1000,29 +999,20 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
case RC_TYPE_RC6:
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
- pad_mouse = false;
break;
case RC_TYPE_UNKNOWN:
case RC_TYPE_OTHER:
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
- if (pad_stabilize && !nomouse)
- pad_mouse = true;
- else {
+ if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
- pad_mouse = false;
- }
/* ir_proto_packet[0] = 0x00; // already the default */
rc_type = RC_TYPE_OTHER;
break;
default:
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
- if (pad_stabilize && !nomouse)
- pad_mouse = true;
- else {
+ if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
- pad_mouse = false;
- }
/* ir_proto_packet[0] = 0x00; // already the default */
rc_type = RC_TYPE_OTHER;
break;
@@ -1035,7 +1025,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
goto out;
ictx->rc_type = rc_type;
- ictx->pad_mouse = pad_mouse;
+ ictx->pad_mouse = false;
out:
return retval;
@@ -1517,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
} else {
- ictx->pad_mouse = 0;
+ ictx->pad_mouse = false;
dev_dbg(dev, "mouse mode disabled, passing key value\n");
}
}
@@ -1756,7 +1746,6 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
ictx->display_type = detected_display_type;
- ictx->rdev->allowed_protos = allowed_protos;
ictx->rc_type = allowed_protos;
}
@@ -1839,10 +1828,6 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
- if (ictx->rc_type == RC_TYPE_RC6)
- rdev->map_name = RC_MAP_IMON_MCE;
- else
- rdev->map_name = RC_MAP_IMON_PAD;
/* Enable front-panel buttons and/or knobs */
memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
@@ -1851,11 +1836,18 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
if (ret)
dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
- if (ictx->product == 0xffdc)
+ if (ictx->product == 0xffdc) {
imon_get_ffdc_type(ictx);
+ rdev->allowed_protos = ictx->rc_type;
+ }
imon_set_display_type(ictx);
+ if (ictx->rc_type == RC_TYPE_RC6)
+ rdev->map_name = RC_MAP_IMON_MCE;
+ else
+ rdev->map_name = RC_MAP_IMON_PAD;
+
ret = rc_register_device(rdev);
if (ret < 0) {
dev_err(ictx->dev, "remote input dev register failed\n");
@@ -2108,18 +2100,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto find_endpoint_failed;
}
- ictx->idev = imon_init_idev(ictx);
- if (!ictx->idev) {
- dev_err(dev, "%s: input device setup failed\n", __func__);
- goto idev_setup_failed;
- }
-
- ictx->rdev = imon_init_rdev(ictx);
- if (!ictx->rdev) {
- dev_err(dev, "%s: rc device setup failed\n", __func__);
- goto rdev_setup_failed;
- }
-
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -2133,13 +2113,25 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto urb_submit_failed;
}
+ ictx->idev = imon_init_idev(ictx);
+ if (!ictx->idev) {
+ dev_err(dev, "%s: input device setup failed\n", __func__);
+ goto idev_setup_failed;
+ }
+
+ ictx->rdev = imon_init_rdev(ictx);
+ if (!ictx->rdev) {
+ dev_err(dev, "%s: rc device setup failed\n", __func__);
+ goto rdev_setup_failed;
+ }
+
return ictx;
-urb_submit_failed:
- rc_unregister_device(ictx->rdev);
rdev_setup_failed:
input_unregister_device(ictx->idev);
idev_setup_failed:
+ usb_kill_urb(ictx->rx_urb_intf0);
+urb_submit_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
usb_free_urb(tx_urb);
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 185baddcbf14..73230ff93b8a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
/* used internally by the sysfs interface */
u64
-ir_raw_get_allowed_protocols()
+ir_raw_get_allowed_protocols(void)
{
u64 protocols;
mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index c59851b203da..7a5f5300caf9 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -19,35 +19,35 @@
static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Pixelview SBTVD remote */
- { 0x8613, KEY_MUTE },
- { 0x8612, KEY_POWER },
- { 0x8601, KEY_1 },
- { 0x8602, KEY_2 },
- { 0x8603, KEY_3 },
- { 0x8604, KEY_4 },
- { 0x8605, KEY_5 },
- { 0x8606, KEY_6 },
- { 0x8607, KEY_7 },
- { 0x8608, KEY_8 },
- { 0x8609, KEY_9 },
- { 0x8600, KEY_0 },
- { 0x860d, KEY_CHANNELUP },
- { 0x8619, KEY_CHANNELDOWN },
- { 0x8610, KEY_VOLUMEUP },
- { 0x860c, KEY_VOLUMEDOWN },
+ { 0x866b13, KEY_MUTE },
+ { 0x866b12, KEY_POWER },
+ { 0x866b01, KEY_1 },
+ { 0x866b02, KEY_2 },
+ { 0x866b03, KEY_3 },
+ { 0x866b04, KEY_4 },
+ { 0x866b05, KEY_5 },
+ { 0x866b06, KEY_6 },
+ { 0x866b07, KEY_7 },
+ { 0x866b08, KEY_8 },
+ { 0x866b09, KEY_9 },
+ { 0x866b00, KEY_0 },
+ { 0x866b0d, KEY_CHANNELUP },
+ { 0x866b19, KEY_CHANNELDOWN },
+ { 0x866b10, KEY_VOLUMEUP },
+ { 0x866b0c, KEY_VOLUMEDOWN },
- { 0x860a, KEY_CAMERA },
- { 0x860b, KEY_ZOOM },
- { 0x861b, KEY_BACKSPACE },
- { 0x8615, KEY_ENTER },
+ { 0x866b0a, KEY_CAMERA },
+ { 0x866b0b, KEY_ZOOM },
+ { 0x866b1b, KEY_BACKSPACE },
+ { 0x866b15, KEY_ENTER },
- { 0x861d, KEY_UP },
- { 0x861e, KEY_DOWN },
- { 0x860e, KEY_LEFT },
- { 0x860f, KEY_RIGHT },
+ { 0x866b1d, KEY_UP },
+ { 0x866b1e, KEY_DOWN },
+ { 0x866b0e, KEY_LEFT },
+ { 0x866b0f, KEY_RIGHT },
- { 0x8618, KEY_RECORD },
- { 0x861a, KEY_STOP },
+ { 0x866b18, KEY_RECORD },
+ { 0x866b1a, KEY_STOP },
/* Key codes for the EvolutePC TVWay+ remote */
{ 0x7a00, KEY_MENU },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0fef6efad537..079353e5d558 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -48,7 +48,6 @@
#define USB_BUFLEN 32 /* USB reception buffer length */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
-#define MS_TO_NS(msec) ((msec) * 1000)
/* MCE constants */
#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
@@ -858,7 +857,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--;
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
- * MS_TO_NS(MCE_TIME_UNIT);
+ * MS_TO_US(MCE_TIME_UNIT);
dev_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index eb875af05e79..aa021600e9df 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -78,7 +78,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_HELPER_CHIPS_AUTO
bool "Autoselect pertinent encoders/decoders and other helper chips"
- default y if !EMBEDDED
+ default y if !EXPERT
---help---
Most video cards may require additional modules to encode or
decode audio/video standards. This option will autoselect
@@ -141,15 +141,6 @@ config VIDEO_TDA9840
To compile this driver as a module, choose M here: the
module will be called tda9840.
-config VIDEO_TDA9875
- tristate "Philips TDA9875 audio processor"
- depends on VIDEO_V4L2 && I2C
- ---help---
- Support for tda9875 audio decoder chip found on some bt8xx boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tda9875.
-
config VIDEO_TEA6415C
tristate "Philips TEA6415C audio processor"
depends on I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 81e38cb0b846..a509d317e258 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
-obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f318b51448b3..d2327dbb473f 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -303,11 +303,22 @@ static int adv7175_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
}
+static int adv7175_s_power(struct v4l2_subdev *sd, int on)
+{
+ if (on)
+ adv7175_write(sd, 0x01, 0x00);
+ else
+ adv7175_write(sd, 0x01, 0x78);
+
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops adv7175_core_ops = {
.g_chip_ident = adv7175_g_chip_ident,
.init = adv7175_init,
+ .s_power = adv7175_s_power,
};
static const struct v4l2_subdev_video_ops adv7175_video_ops = {
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 49efcf660ba6..7f58756d72c8 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -1373,7 +1373,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x1800,
.audio_mode_gpio= fv2000s_audio,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1511,7 +1510,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x09,
.needs_tvaudio = 1,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1550,7 +1548,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask2 = 0x07ff,
.muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.muxsel_hook = rv605_muxsel,
@@ -1686,7 +1683,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x0_848] = {
@@ -1699,7 +1695,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1714,7 +1709,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x1] = {
@@ -1727,7 +1721,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x1_SVID] = {
@@ -1740,7 +1733,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY2xx] = {
@@ -1753,7 +1745,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1768,7 +1759,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY2x0] = {
@@ -1781,7 +1771,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY500] = {
@@ -1794,7 +1783,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY540] = {
@@ -1805,7 +1793,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1820,7 +1807,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1, /* must avoid, conflicts with the bt860 */
},
[BTTV_BOARD_IDS_EAGLE] = {
@@ -1835,7 +1821,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2),
.muxsel_hook = eagle_muxsel,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.pll = PLL_28,
},
[BTTV_BOARD_PINNACLESAT] = {
@@ -1846,7 +1831,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.muxsel = MUXSEL(3, 1),
.pll = PLL_28,
@@ -1897,7 +1881,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.muxsel = MUXSEL(2, 0, 1),
.pll = PLL_28,
@@ -1970,7 +1953,6 @@ struct tvcard bttv_tvcards[] = {
/* Tuner, CVid, SVid, CVid over SVid connector */
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomask = 0,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -2017,7 +1999,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0),
.muxsel_hook = xguard_muxsel,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
},
@@ -2029,7 +2010,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS,
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2134,7 +2114,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS, /* card has no svhs */
.needs_tvaudio = 0,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.gpiomask = 0x00,
.muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
@@ -2156,7 +2135,6 @@ struct tvcard bttv_tvcards[] = {
[BTTV_BOARD_TWINHAN_DST] = {
.name = "Twinhan DST + clones",
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2171,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
/* Vid In, SVid In, Vid over SVid in connector */
.muxsel = MUXSEL(3, 1, 1, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2226,7 +2203,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS,
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
@@ -2278,7 +2254,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
/*878A input is always MUX0, see above.*/
.muxsel = MUXSEL(2, 2, 2, 2),
@@ -2302,7 +2277,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
},
[BTTV_BOARD_AVDVBT_771] = {
/* Wolfram Joost <wojo@frokaschwei.de> */
@@ -2313,7 +2287,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.muxsel = MUXSEL(3, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.has_dvb = 1,
@@ -2329,7 +2302,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 1,
.muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2393,7 +2365,6 @@ struct tvcard bttv_tvcards[] = {
/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
.name = "DViCO FusionHDTV DVB-T Lite",
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.no_video = 1,
@@ -2440,7 +2411,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
.pll = PLL_28,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2478,7 +2448,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = kodicom4400r_muxsel,
},
[BTTV_BOARD_KODICOM_4400R_SL] = {
@@ -2500,7 +2469,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = kodicom4400r_muxsel,
},
/* ---- card 0x86---------------------------------- */
@@ -2530,7 +2498,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x00400005, 0, 0x00000001, 0 },
.gpiomute = 0x00c00007,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.has_dvb = 1,
},
@@ -2630,7 +2597,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
/* ---- card 0x8d ---------------------------------- */
@@ -2658,7 +2624,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 100000, 100002, 100002, 100000 },
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_TNF_5335MF,
@@ -2674,7 +2639,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x0f, /* old: 7 */
.muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2732,7 +2696,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x00400005, 0, 0x00000001, 0 },
.gpiomute = 0x00c00007,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
/* ---- card 0x95---------------------------------- */
@@ -2874,7 +2837,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = gv800s_muxsel,
},
[BTTV_BOARD_GEOVISION_GV800S_SL] = {
@@ -2899,7 +2861,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = gv800s_muxsel,
},
[BTTV_BOARD_PV183] = {
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index fd62bf15d779..c6333595c6b9 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -234,7 +234,6 @@ struct tvcard {
/* i2c audio flags */
unsigned int no_msp34xx:1;
- unsigned int no_tda9875:1;
unsigned int no_tda7432:1;
unsigned int needs_tvaudio:1;
unsigned int msp34xx_alt:1;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 49f1b8f1418e..55ffd60ffa7f 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2001,6 +2001,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
.min_width = 320,
.min_height = 240,
};
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42,
+ .platform_data = &sensor_cfg,
+ };
/*
* Start putting together one of our big camera structures.
@@ -2062,9 +2067,9 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (dmi_check_system(olpc_xo1_dmi))
sensor_cfg.clock_speed = 45;
- cam->sensor_addr = 0x42;
- cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
- "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
+ &ov7670_info, NULL);
if (cam->sensor == NULL) {
ret = -ENODEV;
goto out_smbus;
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 916c13d5cf7d..6d6d1843791c 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -378,7 +378,7 @@ struct cpia2_fh {
struct camera_data {
/* locks */
- struct mutex busy_lock; /* guard against SMP multithreading */
+ struct mutex v4l2_lock; /* serialize file operations */
struct v4l2_prio_state prio;
/* camera status */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 9606bc01b803..aaffca8e13fd 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -2247,7 +2247,7 @@ struct camera_data *cpia2_init_camera_struct(void)
cam->present = 1;
- mutex_init(&cam->busy_lock);
+ mutex_init(&cam->v4l2_lock);
init_waitqueue_head(&cam->wq_stream);
return cam;
@@ -2365,9 +2365,9 @@ long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock)
{
struct framebuf *frame;
- if (!count) {
+
+ if (!count)
return 0;
- }
if (!buf) {
ERR("%s: buffer NULL\n",__func__);
@@ -2379,17 +2379,12 @@ long cpia2_read(struct camera_data *cam,
return -EINVAL;
}
- /* make this _really_ smp and multithread-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
if (!cam->present) {
LOG("%s: camera removed\n",__func__);
- mutex_unlock(&cam->busy_lock);
return 0; /* EOF */
}
- if(!cam->streaming) {
+ if (!cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
@@ -2398,42 +2393,31 @@ long cpia2_read(struct camera_data *cam,
/* Copy cam->curbuff in case it changes while we're processing */
frame = cam->curbuff;
if (noblock && frame->status != FRAME_READY) {
- mutex_unlock(&cam->busy_lock);
return -EAGAIN;
}
- if(frame->status != FRAME_READY) {
- mutex_unlock(&cam->busy_lock);
+ if (frame->status != FRAME_READY) {
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->present ||
(frame = cam->curbuff)->status == FRAME_READY);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- /* make this _really_ smp and multithread-safe */
- if (mutex_lock_interruptible(&cam->busy_lock)) {
- return -ERESTARTSYS;
- }
- if(!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return 0;
- }
}
/* copy data to user space */
- if (frame->length > count) {
- mutex_unlock(&cam->busy_lock);
+ if (frame->length > count)
return -EFAULT;
- }
- if (copy_to_user(buf, frame->data, frame->length)) {
- mutex_unlock(&cam->busy_lock);
+ if (copy_to_user(buf, frame->data, frame->length))
return -EFAULT;
- }
count = frame->length;
frame->status = FRAME_EMPTY;
- mutex_unlock(&cam->busy_lock);
return count;
}
@@ -2447,17 +2431,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
{
unsigned int status=0;
- if(!cam) {
+ if (!cam) {
ERR("%s: Internal error, camera_data not found!\n",__func__);
return POLLERR;
}
- mutex_lock(&cam->busy_lock);
-
- if(!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return POLLHUP;
- }
if(!cam->streaming) {
/* Start streaming */
@@ -2465,16 +2445,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
cam->params.camera_state.stream_mode);
}
- mutex_unlock(&cam->busy_lock);
poll_wait(filp, &cam->wq_stream, wait);
- mutex_lock(&cam->busy_lock);
if(!cam->present)
status = POLLHUP;
else if(cam->curbuff->status == FRAME_READY)
status = POLLIN | POLLRDNORM;
- mutex_unlock(&cam->busy_lock);
return status;
}
@@ -2496,29 +2473,19 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
DBG("mmap offset:%ld size:%ld\n", start_offset, size);
- /* make this _really_ smp-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if (!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return -ENODEV;
- }
if (size > cam->frame_size*cam->num_frames ||
(start_offset % cam->frame_size) != 0 ||
- (start_offset+size > cam->frame_size*cam->num_frames)) {
- mutex_unlock(&cam->busy_lock);
+ (start_offset+size > cam->frame_size*cam->num_frames))
return -EINVAL;
- }
pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
while (size > 0) {
page = kvirt_to_pa(pos);
- if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) {
- mutex_unlock(&cam->busy_lock);
+ if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
- }
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
@@ -2528,7 +2495,5 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
}
cam->mmapped = true;
- mutex_unlock(&cam->busy_lock);
return 0;
}
-
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 7edf80b0d01a..9bad39842936 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -238,59 +238,40 @@ static struct v4l2_queryctrl controls[] = {
static int cpia2_open(struct file *file)
{
struct camera_data *cam = video_drvdata(file);
- int retval = 0;
+ struct cpia2_fh *fh;
if (!cam) {
ERR("Internal error, camera_data not found!\n");
return -ENODEV;
}
- if(mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if(!cam->present) {
- retval = -ENODEV;
- goto err_return;
- }
+ if (!cam->present)
+ return -ENODEV;
- if (cam->open_count > 0) {
- goto skip_init;
- }
+ if (cam->open_count == 0) {
+ if (cpia2_allocate_buffers(cam))
+ return -ENOMEM;
- if (cpia2_allocate_buffers(cam)) {
- retval = -ENOMEM;
- goto err_return;
- }
+ /* reset the camera */
+ if (cpia2_reset_camera(cam) < 0)
+ return -EIO;
- /* reset the camera */
- if (cpia2_reset_camera(cam) < 0) {
- retval = -EIO;
- goto err_return;
+ cam->APP_len = 0;
+ cam->COM_len = 0;
}
- cam->APP_len = 0;
- cam->COM_len = 0;
-
-skip_init:
- {
- struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL);
- if(!fh) {
- retval = -ENOMEM;
- goto err_return;
- }
- file->private_data = fh;
- fh->prio = V4L2_PRIORITY_UNSET;
- v4l2_prio_open(&cam->prio, &fh->prio);
- fh->mmapped = 0;
- }
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+ file->private_data = fh;
+ fh->prio = V4L2_PRIORITY_UNSET;
+ v4l2_prio_open(&cam->prio, &fh->prio);
+ fh->mmapped = 0;
++cam->open_count;
cpia2_dbg_dump_registers(cam);
-
-err_return:
- mutex_unlock(&cam->busy_lock);
- return retval;
+ return 0;
}
/******************************************************************************
@@ -304,15 +285,11 @@ static int cpia2_close(struct file *file)
struct camera_data *cam = video_get_drvdata(dev);
struct cpia2_fh *fh = file->private_data;
- mutex_lock(&cam->busy_lock);
-
if (cam->present &&
- (cam->open_count == 1
- || fh->prio == V4L2_PRIORITY_RECORD
- )) {
+ (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
cpia2_usb_stream_stop(cam);
- if(cam->open_count == 1) {
+ if (cam->open_count == 1) {
/* save camera state for later open */
cpia2_save_camera_state(cam);
@@ -321,26 +298,21 @@ static int cpia2_close(struct file *file)
}
}
- {
- if(fh->mmapped)
- cam->mmapped = 0;
- v4l2_prio_close(&cam->prio, fh->prio);
- file->private_data = NULL;
- kfree(fh);
- }
+ if (fh->mmapped)
+ cam->mmapped = 0;
+ v4l2_prio_close(&cam->prio, fh->prio);
+ file->private_data = NULL;
+ kfree(fh);
if (--cam->open_count == 0) {
cpia2_free_buffers(cam);
if (!cam->present) {
video_unregister_device(dev);
- mutex_unlock(&cam->busy_lock);
kfree(cam);
return 0;
}
}
- mutex_unlock(&cam->busy_lock);
-
return 0;
}
@@ -405,11 +377,11 @@ static int sync(struct camera_data *cam, int frame_nr)
return 0;
}
- mutex_unlock(&cam->busy_lock);
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->streaming ||
frame->status == FRAME_READY);
- mutex_lock(&cam->busy_lock);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
if(!cam->present)
@@ -1293,11 +1265,11 @@ static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file)
if(frame < 0) {
/* Wait for a frame to become available */
struct framebuf *cb=cam->curbuff;
- mutex_unlock(&cam->busy_lock);
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->present ||
(cb=cam->curbuff)->status == FRAME_READY);
- mutex_lock(&cam->busy_lock);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
if(!cam->present)
@@ -1337,14 +1309,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (!cam)
return -ENOTTY;
- /* make this _really_ smp-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if (!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return -ENODEV;
- }
/* Priority check */
switch (cmd) {
@@ -1352,10 +1318,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct cpia2_fh *fh = file->private_data;
retval = v4l2_prio_check(&cam->prio, fh->prio);
- if(retval) {
- mutex_unlock(&cam->busy_lock);
+ if (retval)
return retval;
- }
break;
}
default:
@@ -1529,7 +1493,6 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- mutex_unlock(&cam->busy_lock);
return retval;
}
@@ -1596,7 +1559,7 @@ static const struct v4l2_file_operations cpia2_fops = {
.release = cpia2_close,
.read = cpia2_v4l_read,
.poll = cpia2_v4l_poll,
- .ioctl = cpia2_ioctl,
+ .unlocked_ioctl = cpia2_ioctl,
.mmap = cpia2_mmap,
};
@@ -1620,6 +1583,7 @@ int cpia2_register_camera(struct camera_data *cam)
memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
video_set_drvdata(cam->vdev, cam);
+ cam->vdev->lock = &cam->v4l2_lock;
reset_camera_struct_v4l(cam);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 133ec2bac180..944af8adbe0c 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -664,7 +664,7 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
{
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
- cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
+ cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
if (cx->in_work_queue == NULL) {
CX18_ERR("Unable to create incoming mailbox handler thread\n");
return -ENOMEM;
@@ -672,18 +672,6 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
return 0;
}
-static int __devinit cx18_create_out_workq(struct cx18 *cx)
-{
- snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
- cx->v4l2_dev.name);
- cx->out_work_queue = create_workqueue(cx->out_workq_name);
- if (cx->out_work_queue == NULL) {
- CX18_ERR("Unable to create outgoing mailbox handler threads\n");
- return -ENOMEM;
- }
- return 0;
-}
-
static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
{
int i;
@@ -710,15 +698,9 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
mutex_init(&cx->epu2apu_mb_lock);
mutex_init(&cx->epu2cpu_mb_lock);
- ret = cx18_create_out_workq(cx);
- if (ret)
- return ret;
-
ret = cx18_create_in_workq(cx);
- if (ret) {
- destroy_workqueue(cx->out_work_queue);
+ if (ret)
return ret;
- }
cx18_init_in_work_orders(cx);
@@ -1107,7 +1089,6 @@ free_mem:
release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueues:
destroy_workqueue(cx->in_work_queue);
- destroy_workqueue(cx->out_work_queue);
err:
if (retval == 0)
retval = -ENODEV;
@@ -1259,7 +1240,6 @@ static void cx18_remove(struct pci_dev *pci_dev)
cx18_halt_firmware(cx);
destroy_workqueue(cx->in_work_queue);
- destroy_workqueue(cx->out_work_queue);
cx18_streams_cleanup(cx, 1);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index f6f3e50d4bdf..306caac6d3fc 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -617,9 +617,6 @@ struct cx18 {
struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
- struct workqueue_struct *out_work_queue;
- char out_workq_name[12]; /* "cx18-NN-out" */
-
/* i2c */
struct i2c_adapter i2c_adap[2];
struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 51765eb12d39..713b0e61536d 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -42,8 +42,7 @@ static inline bool cx18_stream_enabled(struct cx18_stream *s)
/* Related to submission of mdls to firmware */
static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
{
- struct cx18 *cx = s->cx;
- queue_work(cx->out_work_queue, &s->out_work_order);
+ schedule_work(&s->out_work_order);
}
static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index fe59a1c3f064..363aa6004221 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -28,7 +28,6 @@
#include <media/videobuf-vmalloc.h>
#include "xc5000.h"
-#include "dvb_dummy_fe.h"
#include "s5h1432.h"
#include "tda18271.h"
#include "s5h1411.h"
@@ -619,7 +618,7 @@ static int dvb_init(struct cx231xx *dev)
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -665,7 +664,7 @@ static int dvb_init(struct cx231xx *dev)
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f16461844c5c..6fc09dd41b9d 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1682,20 +1682,6 @@ static int cx25840_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
-{
- struct cx25840_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (platform_data) {
- struct cx25840_platform_data *pdata = platform_data;
-
- state->pvr150_workaround = pdata->pvr150_workaround;
- set_input(client, state->vid_input, state->aud_input);
- }
- return 0;
-}
-
static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
bool *handled)
{
@@ -1787,7 +1773,6 @@ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
- .s_config = cx25840_s_config,
.g_chip_ident = cx25840_g_chip_ident,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
@@ -1974,7 +1959,6 @@ static int cx25840_probe(struct i2c_client *client,
state->vid_input = CX25840_COMPOSITE7;
state->aud_input = CX25840_AUDIO8;
state->audclk_freq = 48000;
- state->pvr150_workaround = 0;
state->audmode = V4L2_TUNER_MODE_LANG1;
state->vbi_line_offset = 8;
state->id = id;
@@ -2034,6 +2018,12 @@ static int cx25840_probe(struct i2c_client *client,
v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(&state->hdl);
+ if (client->dev.platform_data) {
+ struct cx25840_platform_data *pdata = client->dev.platform_data;
+
+ state->pvr150_workaround = pdata->pvr150_workaround;
+ }
+
cx25840_ir_probe(sd);
return 0;
}
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 1f532e31cd49..9f3bfc1eb240 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -41,6 +41,183 @@ spinlock_t vpif_lock;
void __iomem *vpif_base;
+/**
+ * ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params ch_params[] = {
+ /* HDTV formats */
+ {
+ .name = "480p59_94",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 138-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 43,
+ .l5 = 523,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_480P59_94,
+ },
+ {
+ .name = "576p50",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 144-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 45,
+ .l5 = 621,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_576P50,
+ },
+ {
+ .name = "720p50",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 700-8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_720P50,
+ },
+ {
+ .name = "720p60",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 370 - 8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_720P60,
+ },
+ {
+ .name = "1080I50",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 720 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080I50,
+ },
+ {
+ .name = "1080I60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080I60,
+ },
+ {
+ .name = "1080p60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 42,
+ .l5 = 1122,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080P60,
+ },
+
+ /* SDTV formats */
+ {
+ .name = "NTSC_M",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 268,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 263,
+ .l7 = 266,
+ .l9 = 286,
+ .l11 = 525,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_525_60,
+ },
+ {
+ .name = "PAL_BDGHIK",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 280,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 311,
+ .l7 = 313,
+ .l9 = 336,
+ .l11 = 624,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_625_50,
+ },
+};
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
+
static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
{
if (val)
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index ebd5c4338ebb..10550bd93b06 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -577,12 +577,10 @@ struct vpif_channel_config_params {
char name[VPIF_MAX_NAME]; /* Name of the mode */
u16 width; /* Indicates width of the image */
u16 height; /* Indicates height of the image */
- u8 fps;
- u8 frm_fmt; /* Indicates whether this is interlaced
- * or progressive format */
- u8 ycmux_mode; /* Indicates whether this mode requires
- * single or two channels */
- u16 eav2sav; /* length of sav 2 eav */
+ u8 frm_fmt; /* Interlaced (0) or progressive (1) */
+ u8 ycmux_mode; /* This mode requires one (0) or two (1)
+ channels */
+ u16 eav2sav; /* length of eav 2 sav */
u16 sav2eav; /* length of sav 2 eav */
u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */
u16 vsize; /* Vertical size of the image */
@@ -590,10 +588,14 @@ struct vpif_channel_config_params {
* is in BT or in CCD/CMOS */
u8 vbi_supported; /* Indicates whether this mode
* supports capturing vbi or not */
- u8 hd_sd;
- v4l2_std_id stdid;
+ u8 hd_sd; /* HDTV (1) or SDTV (0) format */
+ v4l2_std_id stdid; /* SDTV format */
+ u32 dv_preset; /* HDTV format */
};
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params ch_params[];
+
struct vpif_video_params;
struct vpif_params;
struct vpif_vbi_params;
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 193abab6b355..d93ad74a34c5 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include "vpif_capture.h"
#include "vpif.h"
@@ -81,20 +82,6 @@ static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
/**
- * ch_params: video standard configuration parameters for vpif
- */
-static const struct vpif_channel_config_params ch_params[] = {
- {
- "NTSC_M", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
- 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
- },
- {
- "PAL_BDGHIK", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
- 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
- },
-};
-
-/**
* vpif_uservirt_to_phys : translate user/virtual address to phy address
* @virtp: user/virtual address
*
@@ -342,7 +329,7 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
* @dev_id: dev_id ptr
*
* It changes status of the captured buffer, takes next buffer from the queue
- * and sets its address in VPIF registers
+ * and sets its address in VPIF registers
*/
static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
{
@@ -435,24 +422,31 @@ static int vpif_update_std_info(struct channel_obj *ch)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpifparams = &ch->vpifparams;
const struct vpif_channel_config_params *config;
- struct vpif_channel_config_params *std_info;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
struct video_obj *vid_ch = &ch->video;
int index;
vpif_dbg(2, debug, "vpif_update_std_info\n");
- std_info = &vpifparams->std_info;
-
- for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
+ for (index = 0; index < vpif_ch_params_count; index++) {
config = &ch_params[index];
- if (config->stdid & vid_ch->stdid) {
- memcpy(std_info, config, sizeof(*config));
- break;
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (config->dv_preset == vid_ch->dv_preset) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
}
}
/* standard not found */
- if (index == ARRAY_SIZE(ch_params))
+ if (index == vpif_ch_params_count)
return -EINVAL;
common->fmt.fmt.pix.width = std_info->width;
@@ -462,6 +456,7 @@ static int vpif_update_std_info(struct channel_obj *ch)
common->fmt.fmt.pix.bytesperline = std_info->width;
vpifparams->video_params.hpitch = std_info->width;
vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
return 0;
}
@@ -757,7 +752,7 @@ static int vpif_open(struct file *filep)
struct video_obj *vid_ch;
struct channel_obj *ch;
struct vpif_fh *fh;
- int i, ret = 0;
+ int i;
vpif_dbg(2, debug, "vpif_open\n");
@@ -766,9 +761,6 @@ static int vpif_open(struct file *filep)
vid_ch = &ch->video;
common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (NULL == ch->curr_subdev_info) {
/**
* search through the sub device to see a registered
@@ -785,8 +777,7 @@ static int vpif_open(struct file *filep)
}
if (i == config->subdev_count) {
vpif_err("No sub device registered\n");
- ret = -ENOENT;
- goto exit;
+ return -ENOENT;
}
}
@@ -794,8 +785,7 @@ static int vpif_open(struct file *filep)
fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (NULL == fh) {
vpif_err("unable to allocate memory for file handle object\n");
- ret = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
/* store pointer to fh in private_data member of filep */
@@ -815,9 +805,7 @@ static int vpif_open(struct file *filep)
/* Initialize priority of this instance to default priority */
fh->prio = V4L2_PRIORITY_UNSET;
v4l2_prio_open(&ch->prio, &fh->prio);
-exit:
- mutex_unlock(&common->lock);
- return ret;
+ return 0;
}
/**
@@ -837,9 +825,6 @@ static int vpif_release(struct file *filep)
common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* if this instance is doing IO */
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
@@ -863,9 +848,6 @@ static int vpif_release(struct file *filep)
/* Decrement channel usrs counter */
ch->usrs--;
- /* unlock mutex on channel object */
- mutex_unlock(&common->lock);
-
/* Close the priority */
v4l2_prio_close(&ch->prio, fh->prio);
@@ -890,7 +872,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct channel_obj *ch = fh->channel;
struct common_obj *common;
u8 index = 0;
- int ret = 0;
vpif_dbg(2, debug, "vpif_reqbufs\n");
@@ -913,13 +894,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
common = &ch->common[index];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
- if (0 != common->io_usrs) {
- ret = -EBUSY;
- goto reqbuf_exit;
- }
+ if (0 != common->io_usrs)
+ return -EBUSY;
/* Initialize videobuf queue as per the buffer type */
videobuf_queue_dma_contig_init(&common->buffer_queue,
@@ -928,7 +904,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
reqbuf->type,
common->fmt.fmt.pix.field,
sizeof(struct videobuf_buffer), fh,
- NULL);
+ &common->lock);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -939,11 +915,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
INIT_LIST_HEAD(&common->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
- mutex_unlock(&common->lock);
- return ret;
+ return videobuf_reqbufs(&common->buffer_queue, reqbuf);
}
/**
@@ -1157,11 +1129,6 @@ static int vpif_streamon(struct file *file, void *priv,
return ret;
}
- if (mutex_lock_interruptible(&common->lock)) {
- ret = -ERESTARTSYS;
- goto streamoff_exit;
- }
-
/* If buffer queue is empty, return error */
if (list_empty(&common->dma_queue)) {
vpif_dbg(1, debug, "buffer queue is empty\n");
@@ -1240,13 +1207,10 @@ static int vpif_streamon(struct file *file, void *priv,
enable_channel1(1);
}
channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
- mutex_unlock(&common->lock);
return ret;
exit:
- mutex_unlock(&common->lock);
-streamoff_exit:
- ret = videobuf_streamoff(&common->buffer_queue);
+ videobuf_streamoff(&common->buffer_queue);
return ret;
}
@@ -1284,9 +1248,6 @@ static int vpif_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* disable channel */
if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
enable_channel0(0);
@@ -1304,8 +1265,6 @@ static int vpif_streamoff(struct file *file, void *priv,
if (ret && (ret != -ENOIOCTLCMD))
vpif_dbg(1, debug, "stream off failed in subdev\n");
- mutex_unlock(&common->lock);
-
return videobuf_streamoff(&common->buffer_queue);
}
@@ -1381,21 +1340,16 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct vpif_fh *fh = priv;
struct channel_obj *ch = fh->channel;
- struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret = 0;
vpif_dbg(2, debug, "vpif_querystd\n");
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* Call querystd function of decoder device */
ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
querystd, std_id);
if (ret < 0)
vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
- mutex_unlock(&common->lock);
return ret;
}
@@ -1451,16 +1405,14 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
fh->initialized = 1;
/* Call encoder subdevice function to set the standard */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
ch->video.stdid = *std_id;
+ ch->video.dv_preset = V4L2_DV_INVALID;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
/* Get the information about the standard */
if (vpif_update_std_info(ch)) {
- ret = -EINVAL;
vpif_err("Error getting the standard info\n");
- goto s_std_exit;
+ return -EINVAL;
}
/* Configure the default format information */
@@ -1471,9 +1423,6 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
s_std, *std_id);
if (ret < 0)
vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
-
-s_std_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1567,9 +1516,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* first setup input path from sub device to vpif */
if (config->setup_input_path) {
ret = config->setup_input_path(ch->channel_id,
@@ -1578,7 +1524,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
vpif_dbg(1, debug, "couldn't setup input path for the"
" sub device %s, for input index %d\n",
subdev_info->name, index);
- goto exit;
+ return ret;
}
}
@@ -1589,7 +1535,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
input, output, 0);
if (ret < 0) {
vpif_dbg(1, debug, "Failed to set input\n");
- goto exit;
+ return ret;
}
}
vid_ch->input_idx = index;
@@ -1600,9 +1546,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
/* update tvnorms from the sub device input info */
ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
-
-exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1671,11 +1614,7 @@ static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
/* Fill in the information about format */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
*fmt = common->fmt;
- mutex_unlock(&common->lock);
return 0;
}
@@ -1694,7 +1633,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_pix_format *pixfmt;
int ret = 0;
- vpif_dbg(2, debug, "VIDIOC_S_FMT\n");
+ vpif_dbg(2, debug, "%s\n", __func__);
/* If streaming is started, return error */
if (common->started) {
@@ -1723,12 +1662,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
/* store the format in the channel object */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
common->fmt = *fmt;
- mutex_unlock(&common->lock);
-
return 0;
}
@@ -1807,6 +1741,306 @@ static int vpif_cropcap(struct file *file, void *priv,
return 0;
}
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_query_dv_presets() - QUERY_DV_PRESET handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_query_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, query_dv_preset, preset);
+}
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ int ret = 0;
+
+ if (common->started) {
+ vpif_dbg(1, debug, "streaming in progress\n");
+ return -EBUSY;
+ }
+
+ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+ (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+ if (!fh->initialized) {
+ vpif_dbg(1, debug, "Channel Busy\n");
+ return -EBUSY;
+ }
+ }
+
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
+ if (ret)
+ return ret;
+
+ fh->initialized = 1;
+
+ /* Call encoder subdevice function to set the standard */
+ if (mutex_lock_interruptible(&common->lock))
+ return -ERESTARTSYS;
+
+ ch->video.dv_preset = preset->preset;
+ ch->video.stdid = V4L2_STD_UNKNOWN;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_std_info(ch)) {
+ vpif_dbg(1, debug, "Error getting the standard info\n");
+ ret = -EINVAL;
+ } else {
+ /* Configure the default format information */
+ vpif_config_format(ch);
+
+ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, s_dv_preset, preset);
+ }
+
+ mutex_unlock(&common->lock);
+
+ return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ preset->preset = ch->video.dv_preset;
+
+ return 0;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+ int ret;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD) {
+ vpif_dbg(2, debug, "Custom DV timings not supported by "
+ "subdevice\n");
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ *bt = timings->bt;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+ bt->hsync - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->vsize = bt->height * 2 +
+ bt->vfrontporch + bt->vsync + bt->vbackporch +
+ bt->il_vfrontporch + bt->il_vsync +
+ bt->il_vbackporch;
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->vsize = bt->height + bt->vfrontporch +
+ bt->vsync + bt->vbackporch;
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ std_info->dv_preset = V4L2_DV_INVALID;
+
+ vid_ch->stdid = 0;
+ vid_ch->dv_preset = V4L2_DV_INVALID;
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+ timings->bt = *bt;
+
+ return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+ vpif_dbg(2, debug, "match_type is invalid.\n");
+ return -EINVAL;
+ }
+
+ return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+ g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+ g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+ s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
/* vpif capture ioctl operations */
static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_querycap = vpif_querycap,
@@ -1829,6 +2063,18 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_streamon = vpif_streamon,
.vidioc_streamoff = vpif_streamoff,
.vidioc_cropcap = vpif_cropcap,
+ .vidioc_enum_dv_presets = vpif_enum_dv_presets,
+ .vidioc_s_dv_preset = vpif_s_dv_preset,
+ .vidioc_g_dv_preset = vpif_g_dv_preset,
+ .vidioc_query_dv_preset = vpif_query_dv_preset,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+ .vidioc_g_chip_ident = vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpif_dbg_g_register,
+ .vidioc_s_register = vpif_dbg_s_register,
+#endif
+ .vidioc_log_status = vpif_log_status,
};
/* vpif file operations */
@@ -1836,7 +2082,7 @@ static struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = vpif_open,
.release = vpif_release,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.mmap = vpif_mmap,
.poll = vpif_poll
};
@@ -1979,6 +2225,7 @@ static __init int vpif_probe(struct platform_device *pdev)
common = &(ch->common[VPIF_VIDEO_INDEX]);
spin_lock_init(&common->irqlock);
mutex_init(&common->lock);
+ ch->video_dev->lock = &common->lock;
/* Initialize prio member of channel object */
v4l2_prio_init(&ch->prio);
err = video_register_device(ch->video_dev,
@@ -2026,9 +2273,9 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd[i])
vpif_obj.sd[i]->grp_id = 1 << i;
}
- v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF Capture driver"
- " initialized\n");
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "DM646x VPIF capture driver initialized\n");
return 0;
probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 4e12ec8cac6f..7a4196dfdce1 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -59,6 +59,8 @@ struct video_obj {
enum v4l2_field buf_field;
/* Currently selected or default standard */
v4l2_std_id stdid;
+ u32 dv_preset;
+ struct v4l2_bt_timings bt_timings;
/* This is to track the last input that is passed to application */
u32 input_idx;
};
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 412c65d54fe1..cdf659abdc2a 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -38,6 +38,7 @@
#include <media/adv7343.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include <mach/dm646x.h>
@@ -84,17 +85,6 @@ static struct vpif_config_params config_params = {
static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
-static const struct vpif_channel_config_params ch_params[] = {
- {
- "NTSC", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
- 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
- },
- {
- "PAL", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
- 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
- },
-};
-
/*
* vpif_uservirt_to_phys: This function is used to convert user
* space virtual address to physical address.
@@ -373,30 +363,54 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int vpif_get_std_info(struct channel_obj *ch)
+static int vpif_update_std_info(struct channel_obj *ch)
{
- struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct video_obj *vid_ch = &ch->video;
struct vpif_params *vpifparams = &ch->vpifparams;
struct vpif_channel_config_params *std_info = &vpifparams->std_info;
const struct vpif_channel_config_params *config;
- int index;
-
- std_info->stdid = vid_ch->stdid;
- if (!std_info->stdid)
- return -1;
+ int i;
- for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
- config = &ch_params[index];
- if (config->stdid & std_info->stdid) {
- memcpy(std_info, config, sizeof(*config));
- break;
+ for (i = 0; i < vpif_ch_params_count; i++) {
+ config = &ch_params[i];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (config->dv_preset == vid_ch->dv_preset) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
}
}
- if (index == ARRAY_SIZE(ch_params))
- return -1;
+ if (i == vpif_ch_params_count) {
+ vpif_dbg(1, debug, "Format not found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+ if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
+ return -EINVAL;
+
+ if (vid_ch->stdid || vid_ch->dv_preset) {
+ if (vpif_update_std_info(ch))
+ return -EINVAL;
+ }
common->fmt.fmt.pix.width = std_info->width;
common->fmt.fmt.pix.height = std_info->height;
@@ -404,8 +418,8 @@ static int vpif_get_std_info(struct channel_obj *ch)
common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
/* Set height and width paramateres */
- ch->common[VPIF_VIDEO_INDEX].height = std_info->height;
- ch->common[VPIF_VIDEO_INDEX].width = std_info->width;
+ common->height = std_info->height;
+ common->width = std_info->width;
return 0;
}
@@ -516,10 +530,8 @@ static int vpif_check_format(struct channel_obj *ch,
else
sizeimage = config_params.channel_bufsize[ch->channel_id];
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
hpitch = pixfmt->bytesperline;
vpitch = sizeimage / (hpitch * 2);
@@ -568,7 +580,10 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct vpif_fh *fh = filep->private_data;
- struct common_obj *common = &fh->channel->common[VPIF_VIDEO_INDEX];
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+ vpif_dbg(2, debug, "vpif_mmap\n");
return videobuf_mmap_mapper(&common->buffer_queue, vma);
}
@@ -637,9 +652,6 @@ static int vpif_release(struct file *filep)
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* if this instance is doing IO */
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
@@ -662,8 +674,6 @@ static int vpif_release(struct file *filep)
config_params.numbuffers[ch->channel_id];
}
- mutex_unlock(&common->lock);
-
/* Decrement channel usrs counter */
atomic_dec(&ch->usrs);
/* If this file handle has initialize encoder device, reset it */
@@ -680,7 +690,12 @@ static int vpif_release(struct file *filep)
}
/* functions implementing ioctls */
-
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
static int vpif_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -722,17 +737,9 @@ static int vpif_g_fmt_vid_out(struct file *file, void *priv,
if (common->fmt.type != fmt->type)
return -EINVAL;
- /* Fill in the information about format */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
-
*fmt = common->fmt;
- mutex_unlock(&common->lock);
return 0;
}
@@ -773,12 +780,7 @@ static int vpif_s_fmt_vid_out(struct file *file, void *priv,
/* store the pix format in the channel object */
common->fmt.fmt.pix = *pixfmt;
/* store the format in the channel object */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
common->fmt = *fmt;
- mutex_unlock(&common->lock);
-
return 0;
}
@@ -808,7 +810,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct common_obj *common;
enum v4l2_field field;
u8 index = 0;
- int ret = 0;
/* This file handle has not initialized the channel,
It is not allowed to do settings */
@@ -826,18 +827,12 @@ static int vpif_reqbufs(struct file *file, void *priv,
index = VPIF_VIDEO_INDEX;
common = &ch->common[index];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
- if (common->fmt.type != reqbuf->type) {
- ret = -EINVAL;
- goto reqbuf_exit;
- }
+ if (common->fmt.type != reqbuf->type)
+ return -EINVAL;
- if (0 != common->io_usrs) {
- ret = -EBUSY;
- goto reqbuf_exit;
- }
+ if (0 != common->io_usrs)
+ return -EBUSY;
if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
@@ -854,7 +849,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
&common->irqlock,
reqbuf->type, field,
sizeof(struct videobuf_buffer), fh,
- NULL);
+ &common->lock);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -865,11 +860,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
INIT_LIST_HEAD(&common->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
- mutex_unlock(&common->lock);
- return ret;
+ return videobuf_reqbufs(&common->buffer_queue, reqbuf);
}
static int vpif_querybuf(struct file *file, void *priv,
@@ -990,22 +981,19 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
}
/* Call encoder subdevice function to set the standard */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
ch->video.stdid = *std_id;
+ ch->video.dv_preset = V4L2_DV_INVALID;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
/* Get the information about the standard */
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
if ((ch->vpifparams.std_info.width *
ch->vpifparams.std_info.height * 2) >
config_params.channel_bufsize[ch->channel_id]) {
vpif_err("invalid std for this size\n");
- ret = -EINVAL;
- goto s_std_exit;
+ return -EINVAL;
}
common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
@@ -1016,16 +1004,13 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
s_std_output, *std_id);
if (ret < 0) {
vpif_err("Failed to set output standard\n");
- goto s_std_exit;
+ return ret;
}
ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
s_std, *std_id);
if (ret < 0)
vpif_err("Failed to set standard for sub devices\n");
-
-s_std_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1090,21 +1075,17 @@ static int vpif_streamon(struct file *file, void *priv,
if (ret < 0)
return ret;
- /* Call videobuf_streamon to start streaming in videobuf */
+ /* Call videobuf_streamon to start streaming in videobuf */
ret = videobuf_streamon(&common->buffer_queue);
if (ret < 0) {
vpif_err("videobuf_streamon\n");
return ret;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* If buffer queue is empty, return error */
if (list_empty(&common->dma_queue)) {
vpif_err("buffer queue is empty\n");
- ret = -EIO;
- goto streamon_exit;
+ return -EIO;
}
/* Get the next frame from the buffer queue */
@@ -1130,8 +1111,7 @@ static int vpif_streamon(struct file *file, void *priv,
|| (!ch->vpifparams.std_info.frm_fmt
&& (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
vpif_err("conflict in field format and std format\n");
- ret = -EINVAL;
- goto streamon_exit;
+ return -EINVAL;
}
/* clock settings */
@@ -1140,13 +1120,13 @@ static int vpif_streamon(struct file *file, void *priv,
ch->vpifparams.std_info.hd_sd);
if (ret < 0) {
vpif_err("can't set clock\n");
- goto streamon_exit;
+ return ret;
}
/* set the parameters and addresses */
ret = vpif_set_video_params(vpif, ch->channel_id + 2);
if (ret < 0)
- goto streamon_exit;
+ return ret;
common->started = ret;
vpif_config_addr(ch, ret);
@@ -1171,9 +1151,6 @@ static int vpif_streamon(struct file *file, void *priv,
}
channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
}
-
-streamon_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1199,9 +1176,6 @@ static int vpif_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/* disable channel */
if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
@@ -1216,8 +1190,6 @@ static int vpif_streamoff(struct file *file, void *priv,
}
common->started = 0;
- mutex_unlock(&common->lock);
-
return videobuf_streamoff(&common->buffer_queue);
}
@@ -1264,13 +1236,9 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret = 0;
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (common->started) {
vpif_err("Streaming in progress\n");
- ret = -EBUSY;
- goto s_output_exit;
+ return -EBUSY;
}
ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
@@ -1280,9 +1248,6 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
vpif_err("Failed to set output standard\n");
vid_ch->output_id = i;
-
-s_output_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1315,6 +1280,287 @@ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p)
return v4l2_prio_change(&ch->prio, &fh->prio, p);
}
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ int ret = 0;
+
+ if (common->started) {
+ vpif_dbg(1, debug, "streaming in progress\n");
+ return -EBUSY;
+ }
+
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
+ if (ret != 0)
+ return ret;
+
+ fh->initialized = 1;
+
+ /* Call encoder subdevice function to set the standard */
+ if (mutex_lock_interruptible(&common->lock))
+ return -ERESTARTSYS;
+
+ ch->video.dv_preset = preset->preset;
+ ch->video.stdid = V4L2_STD_UNKNOWN;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_resolution(ch)) {
+ ret = -EINVAL;
+ } else {
+ /* Configure the default format information */
+ vpif_config_format(ch);
+
+ ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, s_dv_preset, preset);
+ }
+
+ mutex_unlock(&common->lock);
+
+ return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ preset->preset = ch->video.dv_preset;
+
+ return 0;
+}
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+ int ret;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD) {
+ vpif_dbg(2, debug, "Custom DV timings not supported by "
+ "subdevice\n");
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ *bt = timings->bt;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+ bt->hsync - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->vsize = bt->height * 2 +
+ bt->vfrontporch + bt->vsync + bt->vbackporch +
+ bt->il_vfrontporch + bt->il_vsync +
+ bt->il_vbackporch;
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->vsize = bt->height + bt->vfrontporch +
+ bt->vsync + bt->vbackporch;
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120",
+ VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ std_info->dv_preset = V4L2_DV_INVALID;
+
+ vid_ch->stdid = 0;
+ vid_ch->dv_preset = V4L2_DV_INVALID;
+
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+ timings->bt = *bt;
+
+ return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+ vpif_dbg(2, debug, "match_type is invalid.\n");
+ return -EINVAL;
+ }
+
+ return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+ g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+ g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+ s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
/* vpif display ioctl operations */
static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_querycap = vpif_querycap,
@@ -1336,13 +1582,24 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_s_output = vpif_s_output,
.vidioc_g_output = vpif_g_output,
.vidioc_cropcap = vpif_cropcap,
+ .vidioc_enum_dv_presets = vpif_enum_dv_presets,
+ .vidioc_s_dv_preset = vpif_s_dv_preset,
+ .vidioc_g_dv_preset = vpif_g_dv_preset,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+ .vidioc_g_chip_ident = vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpif_dbg_g_register,
+ .vidioc_s_register = vpif_dbg_s_register,
+#endif
+ .vidioc_log_status = vpif_log_status,
};
static const struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = vpif_open,
.release = vpif_release,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.mmap = vpif_mmap,
.poll = vpif_poll
};
@@ -1526,6 +1783,7 @@ static __init int vpif_probe(struct platform_device *pdev)
v4l2_prio_init(&ch->prio);
ch->common[VPIF_VIDEO_INDEX].fmt.type =
V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ch->video_dev->lock = &common->lock;
/* register video device */
vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1565,6 +1823,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.sd[i]->grp_id = 1 << i;
}
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "DM646x VPIF display driver initialized\n");
return 0;
probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index a2a7cd166bbf..b53aaa883075 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -67,6 +67,8 @@ struct video_obj {
* most recent displayed frame only */
v4l2_std_id stdid; /* Currently selected or default
* standard */
+ u32 dv_preset;
+ struct v4l2_bt_timings bt_timings;
u32 output_id; /* Current output id */
};
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 099d5df8c572..87f77a34eeab 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -33,6 +33,7 @@
#include <media/saa7115.h>
#include <media/tvp5150.h>
#include <media/tvaudio.h>
+#include <media/mt9v011.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
#include <media/v4l2-common.h>
@@ -1917,11 +1918,6 @@ static unsigned short tvp5150_addrs[] = {
I2C_CLIENT_END
};
-static unsigned short mt9v011_addrs[] = {
- 0xba >> 1,
- I2C_CLIENT_END
-};
-
static unsigned short msp3400_addrs[] = {
0x80 >> 1,
0x88 >> 1,
@@ -2437,6 +2433,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+ break;
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
@@ -2623,11 +2620,17 @@ void em28xx_card_setup(struct em28xx *dev)
"tvp5150", 0, tvp5150_addrs);
if (dev->em28xx_sensor == EM28XX_MT9V011) {
+ struct mt9v011_platform_data pdata;
+ struct i2c_board_info mt9v011_info = {
+ .type = "mt9v011",
+ .addr = 0xba >> 1,
+ .platform_data = &pdata,
+ };
struct v4l2_subdev *sd;
- sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
- v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
+ pdata.xtal = dev->sensor_xtal;
+ sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+ &mt9v011_info, NULL);
}
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index cc77d144df3c..bf66189cb26d 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -59,31 +59,7 @@
/*****************************************************************************/
static const struct usb_device_id et61x251_id_table[] = {
- { USB_DEVICE(0x102c, 0x6151), },
{ USB_DEVICE(0x102c, 0x6251), },
- { USB_DEVICE(0x102c, 0x6253), },
- { USB_DEVICE(0x102c, 0x6254), },
- { USB_DEVICE(0x102c, 0x6255), },
- { USB_DEVICE(0x102c, 0x6256), },
- { USB_DEVICE(0x102c, 0x6257), },
- { USB_DEVICE(0x102c, 0x6258), },
- { USB_DEVICE(0x102c, 0x6259), },
- { USB_DEVICE(0x102c, 0x625a), },
- { USB_DEVICE(0x102c, 0x625b), },
- { USB_DEVICE(0x102c, 0x625c), },
- { USB_DEVICE(0x102c, 0x625d), },
- { USB_DEVICE(0x102c, 0x625e), },
- { USB_DEVICE(0x102c, 0x625f), },
- { USB_DEVICE(0x102c, 0x6260), },
- { USB_DEVICE(0x102c, 0x6261), },
- { USB_DEVICE(0x102c, 0x6262), },
- { USB_DEVICE(0x102c, 0x6263), },
- { USB_DEVICE(0x102c, 0x6264), },
- { USB_DEVICE(0x102c, 0x6265), },
- { USB_DEVICE(0x102c, 0x6266), },
- { USB_DEVICE(0x102c, 0x6267), },
- { USB_DEVICE(0x102c, 0x6268), },
- { USB_DEVICE(0x102c, 0x6269), },
{ }
};
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 629043933501..a09c4709d613 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -276,7 +276,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04a5, 0x3035)},
{}
};
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 1eacb6c7926d..8b398493f96b 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1040,14 +1040,14 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0572, 0x0041)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index c1ae05f4661f..4bf2cab98d64 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2088,7 +2088,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0553, 0x0002)},
{USB_DEVICE(0x0813, 0x0001)},
{}
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index a594b36d6199..4b2c483fce6f 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
{USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index d78226455d1f..987b4b69d7ab 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -229,7 +229,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04cb, 0x0104)},
{USB_DEVICE(0x04cb, 0x0109)},
{USB_DEVICE(0x04cb, 0x010b)},
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index b05bec7321b5..99083038cec3 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -488,7 +488,7 @@ static void sd_callback(struct gspca_dev *gspca_dev)
/*=================== USB driver structure initialisation ==================*/
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e3, 0x0503)},
{USB_DEVICE(0x05e3, 0xf191)},
{}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 442970073e8a..f21f2a258ae0 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 11, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 12, 0)
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -508,8 +508,8 @@ static int gspca_is_compressed(__u32 format)
return 0;
}
-static int frame_alloc(struct gspca_dev *gspca_dev,
- unsigned int count)
+static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory, unsigned int count)
{
struct gspca_frame *frame;
unsigned int frsz;
@@ -519,7 +519,6 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
frsz = gspca_dev->cam.cam_mode[i].sizeimage;
PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
frsz = PAGE_ALIGN(frsz);
- gspca_dev->frsz = frsz;
if (count >= GSPCA_MAX_FRAMES)
count = GSPCA_MAX_FRAMES - 1;
gspca_dev->frbuf = vmalloc_32(frsz * count);
@@ -527,6 +526,9 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
err("frame alloc failed");
return -ENOMEM;
}
+ gspca_dev->capt_file = file;
+ gspca_dev->memory = memory;
+ gspca_dev->frsz = frsz;
gspca_dev->nframes = count;
for (i = 0; i < count; i++) {
frame = &gspca_dev->frame[i];
@@ -535,7 +537,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
frame->v4l2_buf.flags = 0;
frame->v4l2_buf.field = V4L2_FIELD_NONE;
frame->v4l2_buf.length = frsz;
- frame->v4l2_buf.memory = gspca_dev->memory;
+ frame->v4l2_buf.memory = memory;
frame->v4l2_buf.sequence = 0;
frame->data = gspca_dev->frbuf + i * frsz;
frame->v4l2_buf.m.offset = i * frsz;
@@ -558,6 +560,9 @@ static void frame_free(struct gspca_dev *gspca_dev)
gspca_dev->frame[i].data = NULL;
}
gspca_dev->nframes = 0;
+ gspca_dev->frsz = 0;
+ gspca_dev->capt_file = NULL;
+ gspca_dev->memory = GSPCA_MEMORY_NO;
}
static void destroy_urbs(struct gspca_dev *gspca_dev)
@@ -1210,29 +1215,15 @@ static void gspca_release(struct video_device *vfd)
static int dev_open(struct file *file)
{
struct gspca_dev *gspca_dev;
- int ret;
PDEBUG(D_STREAM, "[%s] open", current->comm);
gspca_dev = (struct gspca_dev *) video_devdata(file);
- if (mutex_lock_interruptible(&gspca_dev->queue_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
-
- if (gspca_dev->users > 4) { /* (arbitrary value) */
- ret = -EBUSY;
- goto out;
- }
+ if (!gspca_dev->present)
+ return -ENODEV;
/* protect the subdriver against rmmod */
- if (!try_module_get(gspca_dev->module)) {
- ret = -ENODEV;
- goto out;
- }
-
- gspca_dev->users++;
+ if (!try_module_get(gspca_dev->module))
+ return -ENODEV;
file->private_data = gspca_dev;
#ifdef GSPCA_DEBUG
@@ -1244,14 +1235,7 @@ static int dev_open(struct file *file)
gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
| V4L2_DEBUG_IOCTL_ARG);
#endif
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->queue_lock);
- if (ret != 0)
- PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
- else
- PDEBUG(D_STREAM, "open done");
- return ret;
+ return 0;
}
static int dev_close(struct file *file)
@@ -1261,7 +1245,6 @@ static int dev_close(struct file *file)
PDEBUG(D_STREAM, "[%s] close", current->comm);
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
- gspca_dev->users--;
/* if the file did the capture, free the streaming resources */
if (gspca_dev->capt_file == file) {
@@ -1272,8 +1255,6 @@ static int dev_close(struct file *file)
mutex_unlock(&gspca_dev->usb_lock);
}
frame_free(gspca_dev);
- gspca_dev->capt_file = NULL;
- gspca_dev->memory = GSPCA_MEMORY_NO;
}
file->private_data = NULL;
module_put(gspca_dev->module);
@@ -1516,6 +1497,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -ERESTARTSYS;
if (gspca_dev->memory != GSPCA_MEMORY_NO
+ && gspca_dev->memory != GSPCA_MEMORY_READ
&& gspca_dev->memory != rb->memory) {
ret = -EBUSY;
goto out;
@@ -1544,19 +1526,18 @@ static int vidioc_reqbufs(struct file *file, void *priv,
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
+ /* Don't restart the stream when switching from read to mmap mode */
+ if (gspca_dev->memory == GSPCA_MEMORY_READ)
+ streaming = 0;
/* free the previous allocated buffers, if any */
- if (gspca_dev->nframes != 0) {
+ if (gspca_dev->nframes != 0)
frame_free(gspca_dev);
- gspca_dev->capt_file = NULL;
- }
if (rb->count == 0) /* unrequest */
goto out;
- gspca_dev->memory = rb->memory;
- ret = frame_alloc(gspca_dev, rb->count);
+ ret = frame_alloc(gspca_dev, file, rb->memory, rb->count);
if (ret == 0) {
rb->count = gspca_dev->nframes;
- gspca_dev->capt_file = file;
if (streaming)
ret = gspca_init_transfer(gspca_dev);
}
@@ -1630,11 +1611,15 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (!gspca_dev->streaming)
- return 0;
+
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->streaming) {
+ ret = 0;
+ goto out;
+ }
+
/* check the capture file */
if (gspca_dev->capt_file != file) {
ret = -EBUSY;
@@ -1649,6 +1634,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
+ /* In case another thread is waiting in dqbuf */
+ wake_up_interruptible(&gspca_dev->wq);
/* empty the transfer queues */
atomic_set(&gspca_dev->fr_q, 0);
@@ -1827,33 +1814,77 @@ out:
return ret;
}
+static int frame_ready_nolock(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory)
+{
+ if (!gspca_dev->present)
+ return -ENODEV;
+ if (gspca_dev->capt_file != file || gspca_dev->memory != memory ||
+ !gspca_dev->streaming)
+ return -EINVAL;
+
+ /* check if a frame is ready */
+ return gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i);
+}
+
+static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
+ ret = frame_ready_nolock(gspca_dev, file, memory);
+ mutex_unlock(&gspca_dev->queue_lock);
+ return ret;
+}
+
/*
- * wait for a video frame
+ * dequeue a video buffer
*
- * If a frame is ready, its index is returned.
+ * If nonblock_ing is false, block until a buffer is available.
*/
-static int frame_wait(struct gspca_dev *gspca_dev,
- int nonblock_ing)
+static int vidioc_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *v4l2_buf)
{
- int i, ret;
+ struct gspca_dev *gspca_dev = priv;
+ struct gspca_frame *frame;
+ int i, j, ret;
- /* check if a frame is ready */
- i = gspca_dev->fr_o;
- if (i == atomic_read(&gspca_dev->fr_i)) {
- if (nonblock_ing)
+ PDEBUG(D_FRAM, "dqbuf");
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
+
+ for (;;) {
+ ret = frame_ready_nolock(gspca_dev, file, v4l2_buf->memory);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ break;
+
+ mutex_unlock(&gspca_dev->queue_lock);
+
+ if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
/* wait till a frame is ready */
ret = wait_event_interruptible_timeout(gspca_dev->wq,
- i != atomic_read(&gspca_dev->fr_i) ||
- !gspca_dev->streaming || !gspca_dev->present,
+ frame_ready(gspca_dev, file, v4l2_buf->memory),
msecs_to_jiffies(3000));
if (ret < 0)
return ret;
- if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present)
+ if (ret == 0)
return -EIO;
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
}
+ i = gspca_dev->fr_o;
+ j = gspca_dev->fr_queue[i];
+ frame = &gspca_dev->frame[j];
+
gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
if (gspca_dev->sd_desc->dq_callback) {
@@ -1863,46 +1894,12 @@ static int frame_wait(struct gspca_dev *gspca_dev,
gspca_dev->sd_desc->dq_callback(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
- return gspca_dev->fr_queue[i];
-}
-
-/*
- * dequeue a video buffer
- *
- * If nonblock_ing is false, block until a buffer is available.
- */
-static int vidioc_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *v4l2_buf)
-{
- struct gspca_dev *gspca_dev = priv;
- struct gspca_frame *frame;
- int i, ret;
-
- PDEBUG(D_FRAM, "dqbuf");
- if (v4l2_buf->memory != gspca_dev->memory)
- return -EINVAL;
-
- if (!gspca_dev->present)
- return -ENODEV;
-
- /* if not streaming, be sure the application will not loop forever */
- if (!(file->f_flags & O_NONBLOCK)
- && !gspca_dev->streaming && gspca_dev->users == 1)
- return -EINVAL;
- /* only the capturing file may dequeue */
- if (gspca_dev->capt_file != file)
- return -EINVAL;
-
- /* only one dequeue / read at a time */
- if (mutex_lock_interruptible(&gspca_dev->read_lock))
- return -ERESTARTSYS;
+ frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
+ memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
+ PDEBUG(D_FRAM, "dqbuf %d", j);
+ ret = 0;
- ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK);
- if (ret < 0)
- goto out;
- i = ret; /* frame index */
- frame = &gspca_dev->frame[i];
if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
frame->data,
@@ -1910,15 +1907,10 @@ static int vidioc_dqbuf(struct file *file, void *priv,
PDEBUG(D_ERR|D_STREAM,
"dqbuf cp to user failed");
ret = -EFAULT;
- goto out;
}
}
- frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
- memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
- PDEBUG(D_FRAM, "dqbuf %d", i);
- ret = 0;
out:
- mutex_unlock(&gspca_dev->read_lock);
+ mutex_unlock(&gspca_dev->queue_lock);
return ret;
}
@@ -2033,9 +2025,7 @@ static unsigned int dev_poll(struct file *file, poll_table *wait)
poll_wait(file, &gspca_dev->wq, wait);
/* if reqbufs is not done, the user would use read() */
- if (gspca_dev->nframes == 0) {
- if (gspca_dev->memory != GSPCA_MEMORY_NO)
- return POLLERR; /* not the 1st time */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) {
ret = read_alloc(gspca_dev, file);
if (ret != 0)
return POLLERR;
@@ -2067,18 +2057,10 @@ static ssize_t dev_read(struct file *file, char __user *data,
PDEBUG(D_FRAM, "read (%zd)", count);
if (!gspca_dev->present)
return -ENODEV;
- switch (gspca_dev->memory) {
- case GSPCA_MEMORY_NO: /* first time */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
ret = read_alloc(gspca_dev, file);
if (ret != 0)
return ret;
- break;
- case GSPCA_MEMORY_READ:
- if (gspca_dev->capt_file == file)
- break;
- /* fall thru */
- default:
- return -EINVAL;
}
/* get a frame */
@@ -2266,7 +2248,6 @@ int gspca_dev_probe2(struct usb_interface *intf,
goto out;
mutex_init(&gspca_dev->usb_lock);
- mutex_init(&gspca_dev->read_lock);
mutex_init(&gspca_dev->queue_lock);
init_waitqueue_head(&gspca_dev->wq);
@@ -2341,12 +2322,11 @@ void gspca_disconnect(struct usb_interface *intf)
PDEBUG(D_PROBE, "%s disconnect",
video_device_node_name(&gspca_dev->vdev));
mutex_lock(&gspca_dev->usb_lock);
+
gspca_dev->present = 0;
+ wake_up_interruptible(&gspca_dev->wq);
- if (gspca_dev->streaming) {
- destroy_urbs(gspca_dev);
- wake_up_interruptible(&gspca_dev->wq);
- }
+ destroy_urbs(gspca_dev);
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
gspca_input_destroy_urb(gspca_dev);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 97b77a26a2eb..41755226d389 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -205,14 +205,12 @@ struct gspca_dev {
wait_queue_head_t wq; /* wait queue */
struct mutex usb_lock; /* usb exchange protection */
- struct mutex read_lock; /* read protection */
struct mutex queue_lock; /* ISOC queue protection */
int usb_err; /* USB error - protected by usb_lock */
u16 pkt_size; /* ISOC packet size */
#ifdef CONFIG_PM
char frozen; /* suspend - resume */
#endif
- char users; /* number of opens */
char present; /* device connected */
char nbufread; /* number of buffers for read() */
char memory; /* memory type (V4L2_MEMORY_xxx) */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index a35e87bb0388..06b777f5379e 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -314,7 +314,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0979, 0x0280)},
{}
};
diff --git a/drivers/media/video/gspca/jpeg.h b/drivers/media/video/gspca/jpeg.h
index de63c36806c0..ab54910418b4 100644
--- a/drivers/media/video/gspca/jpeg.h
+++ b/drivers/media/video/gspca/jpeg.h
@@ -141,9 +141,9 @@ static void jpeg_define(u8 *jpeg_hdr,
memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
#ifndef CONEX_CAM
jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
- jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height & 0xff;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
- jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width & 0xff;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY;
#endif
}
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index d2ce65dcbfdc..5964691c0e95 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -607,7 +607,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
{}
};
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index c872b93a3351..a7722b1aef9b 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -28,7 +28,7 @@ int force_sensor;
static int dump_bridge;
int dump_sensor;
-static const __devinitdata struct usb_device_id m5602_table[] = {
+static const struct usb_device_id m5602_table[] = {
{USB_DEVICE(0x0402, 0x5602)},
{}
};
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index a81536e78698..cb4d0bf0d784 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -490,7 +490,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x050f)},
{}
};
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 7607a288b51c..3884c9d300c5 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1229,7 +1229,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x08ca, 0x0110)}, /* Trust Spyc@m 100 */
{USB_DEVICE(0x08ca, 0x0111)}, /* Aiptek Pencam VGA+ */
{USB_DEVICE(0x093a, 0x010f)}, /* All other known MR97310A VGA cams */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index e1c3b9328ace..8ab2c452c25e 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -488,7 +488,6 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
#define R511_SNAP_PXDIV 0x1c
#define R511_SNAP_LNDIV 0x1d
#define R511_SNAP_UV_EN 0x1e
-#define R511_SNAP_UV_EN 0x1e
#define R511_SNAP_OPTS 0x1f
#define R511_DRAM_FLOW_CTL 0x20
@@ -1847,8 +1846,7 @@ static const struct ov_i2c_regvals norm_7670[] = {
{ 0x6c, 0x0a },
{ 0x6d, 0x55 },
{ 0x6e, 0x11 },
- { 0x6f, 0x9f },
- /* "9e for advance AWB" */
+ { 0x6f, 0x9f }, /* "9e for advance AWB" */
{ 0x6a, 0x40 },
{ OV7670_R01_BLUE, 0x40 },
{ OV7670_R02_RED, 0x60 },
@@ -3054,7 +3052,7 @@ static void ov519_configure(struct sd *sd)
{
static const struct ov_regvals init_519[] = {
{ 0x5a, 0x6d }, /* EnableSystem */
- { 0x53, 0x9b },
+ { 0x53, 0x9b }, /* don't enable the microcontroller */
{ OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
{ 0x5d, 0x03 },
{ 0x49, 0x01 },
@@ -4747,7 +4745,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0edf93973b1c..04da22802736 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -479,15 +479,20 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
struct usb_device *udev = gspca_dev->dev;
int ret;
- PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val);
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val);
gspca_dev->usb_buf[0] = val;
ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
err("write failed %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -495,14 +500,18 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
struct usb_device *udev = gspca_dev->dev;
int ret;
+ if (gspca_dev->usb_err < 0)
+ return 0;
ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
0x01,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
- PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
- if (ret < 0)
+ PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
+ if (ret < 0) {
err("read failed %d", ret);
+ gspca_dev->usb_err = ret;
+ }
return gspca_dev->usb_buf[0];
}
@@ -558,13 +567,15 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
- PDEBUG(D_USBO, "reg: 0x%02x, val: 0x%02x", reg, val);
+ PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val);
ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
- if (!sccb_check_status(gspca_dev))
+ if (!sccb_check_status(gspca_dev)) {
err("sccb_reg_write failed");
+ gspca_dev->usb_err = -EIO;
+ }
}
static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -885,7 +896,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ov534_set_led(gspca_dev, 0);
set_frame_rate(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
@@ -920,7 +931,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1289,7 +1300,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x1415, 0x2000)},
{}
};
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index c5244b4b4777..aaf5428c57f5 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1429,7 +1429,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06f8, 0x3003)},
{}
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 96f9986305b4..81739a2f205e 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -530,7 +530,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4028)},
{USB_DEVICE(0x093a, 0x2460)},
{USB_DEVICE(0x093a, 0x2461)},
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2700975abce5..5615d7bd8304 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1184,7 +1184,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06f8, 0x3009)},
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
@@ -1201,7 +1201,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 6820f5d58b19..f8801b50e64f 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -837,7 +837,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2600)},
{USB_DEVICE(0x093a, 0x2601)},
{USB_DEVICE(0x093a, 0x2603)},
@@ -849,7 +849,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 40a06680502d..4271f86dfe01 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -703,7 +703,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
/* The Genius Smart is untested. I can't find an owner ! */
/* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index cb08d00d0a31..fcf29897b713 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2470,7 +2470,7 @@ static const struct sd_desc sd_desc = {
| (SENSOR_ ## sensor << 8) \
| (i2c_addr)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 73504a3f87b7..c6cd68d66b53 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -23,8 +23,15 @@
/* Some documentation on known sonixb registers:
Reg Use
+sn9c101 / sn9c102:
0x10 high nibble red gain low nibble blue gain
0x11 low nibble green gain
+sn9c103:
+0x05 red gain 0-127
+0x06 blue gain 0-127
+0x07 green gain 0-127
+all:
+0x08-0x0f i2c / 3wire registers
0x12 hstart
0x13 vstart
0x15 hsize (hsize = register-value * 16)
@@ -88,12 +95,9 @@ struct sd {
typedef const __u8 sensor_init_t[8];
struct sensor_data {
- const __u8 *bridge_init[2];
- int bridge_init_size[2];
+ const __u8 *bridge_init;
sensor_init_t *sensor_init;
int sensor_init_size;
- sensor_init_t *sensor_bridge_init[2];
- int sensor_bridge_init_size[2];
int flags;
unsigned ctrl_dis;
__u8 sensor_addr;
@@ -114,7 +118,6 @@ struct sensor_data {
#define NO_FREQ (1 << FREQ_IDX)
#define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
-#define COMP2 0x8f
#define COMP 0xc7 /* 0x87 //0x07 */
#define COMP1 0xc9 /* 0x89 //0x09 */
@@ -123,15 +126,11 @@ struct sensor_data {
#define SYS_CLK 0x04
-#define SENS(bridge_1, bridge_3, sensor, sensor_1, \
- sensor_3, _flags, _ctrl_dis, _sensor_addr) \
+#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
{ \
- .bridge_init = { bridge_1, bridge_3 }, \
- .bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
+ .bridge_init = bridge, \
.sensor_init = sensor, \
.sensor_init_size = sizeof(sensor), \
- .sensor_bridge_init = { sensor_1, sensor_3,}, \
- .sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
.flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
}
@@ -311,7 +310,6 @@ static const __u8 initHv7131d[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
0x28, 0x1e, 0x60, 0x8e, 0x42,
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
};
static const __u8 hv7131d_sensor_init[][8] = {
{0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17},
@@ -326,7 +324,6 @@ static const __u8 initHv7131r[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
0x28, 0x1e, 0x60, 0x8a, 0x20,
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
};
static const __u8 hv7131r_sensor_init[][8] = {
{0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10},
@@ -339,7 +336,7 @@ static const __u8 initOv6650[] = {
0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
- 0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
+ 0x10,
};
static const __u8 ov6650_sensor_init[][8] = {
/* Bright, contrast, etc are set through SCBB interface.
@@ -378,24 +375,13 @@ static const __u8 initOv7630[] = {
0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */
0x28, 0x1e, /* H & V sizes r15 .. r16 */
- 0x68, COMP2, MCK_INIT1, /* r17 .. r19 */
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */
-};
-static const __u8 initOv7630_3[] = {
- 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */
- 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
- 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */
- 0x28, 0x1e, /* H & V sizes r15 .. r16 */
0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00, /* r1a .. r20 */
- 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, /* r21 .. r28 */
- 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff /* r29 .. r30 */
};
static const __u8 ov7630_sensor_init[][8] = {
{0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10},
/* {0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10}, jfm */
- {0xd0, 0x21, 0x12, 0x1c, 0x00, 0x80, 0x34, 0x10}, /* jfm */
+ {0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10}, /* jfm */
{0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10},
{0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10},
{0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10},
@@ -413,16 +399,11 @@ static const __u8 ov7630_sensor_init[][8] = {
{0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
};
-static const __u8 ov7630_sensor_init_3[][8] = {
- {0xa0, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
-};
-
static const __u8 initPas106[] = {
0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
0x16, 0x12, 0x24, COMP1, MCK_INIT1,
- 0x18, 0x10, 0x02, 0x02, 0x09, 0x07
};
/* compression 0x86 mckinit1 0x2b */
@@ -496,7 +477,6 @@ static const __u8 initPas202[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
0x28, 0x1e, 0x20, 0x89, 0x20,
- 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
};
/* "Known" PAS202BCB registers:
@@ -537,7 +517,6 @@ static const __u8 initTas5110c[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x45, 0x09, 0x0a,
0x16, 0x12, 0x60, 0x86, 0x2b,
- 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
};
/* Same as above, except a different hstart */
static const __u8 initTas5110d[] = {
@@ -545,12 +524,19 @@ static const __u8 initTas5110d[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x41, 0x09, 0x0a,
0x16, 0x12, 0x60, 0x86, 0x2b,
- 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
};
-static const __u8 tas5110_sensor_init[][8] = {
+/* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */
+static const __u8 tas5110c_sensor_init[][8] = {
{0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10},
{0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10},
- {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17},
+};
+/* Known TAS5110D registers
+ * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain
+ * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted)
+ * Note: writing reg03 seems to only work when written together with 02
+ */
+static const __u8 tas5110d_sensor_init[][8] = {
+ {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */
};
static const __u8 initTas5130[] = {
@@ -558,7 +544,6 @@ static const __u8 initTas5130[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a,
0x28, 0x1e, 0x60, COMP, MCK_INIT,
- 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
};
static const __u8 tas5130_sensor_init[][8] = {
/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
@@ -569,21 +554,18 @@ static const __u8 tas5130_sensor_init[][8] = {
};
static struct sensor_data sensor_data[] = {
-SENS(initHv7131d, NULL, hv7131d_sensor_init, NULL, NULL, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initHv7131r, NULL, hv7131r_sensor_init, NULL, NULL, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
-SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60),
-SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3,
- F_GAIN, 0, 0x21),
-SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ,
- 0),
-SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN,
- NO_FREQ, 0),
-SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL,
- F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL,
- F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
- 0),
+SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
+SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
+SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
+SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
+SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
+SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+ NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+ NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5130, tas5130_sensor_init, F_GAIN,
+ NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
};
/* get one byte in gspca_dev->usb_buf */
@@ -655,7 +637,6 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- __u8 value;
switch (sd->sensor) {
case SENSOR_OV6650:
@@ -697,17 +678,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
goto err;
break;
}
- case SENSOR_TAS5130CXX: {
- __u8 i2c[] =
- {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
-
- value = 0xff - sd->brightness;
- i2c[4] = value;
- PDEBUG(D_CONF, "brightness %d : %d", value, i2c[4]);
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
- break;
- }
}
return;
err:
@@ -733,7 +703,7 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
break;
}
case SENSOR_TAS5110C:
- case SENSOR_TAS5110D: {
+ case SENSOR_TAS5130CXX: {
__u8 i2c[] =
{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
@@ -742,6 +712,23 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
goto err;
break;
}
+ case SENSOR_TAS5110D: {
+ __u8 i2c[] = {
+ 0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
+ gain = 255 - gain;
+ /* The bits in the register are the wrong way around!! */
+ i2c[3] |= (gain & 0x80) >> 7;
+ i2c[3] |= (gain & 0x40) >> 5;
+ i2c[3] |= (gain & 0x20) >> 3;
+ i2c[3] |= (gain & 0x10) >> 1;
+ i2c[3] |= (gain & 0x08) << 1;
+ i2c[3] |= (gain & 0x04) << 3;
+ i2c[3] |= (gain & 0x02) << 5;
+ i2c[3] |= (gain & 0x01) << 7;
+ if (i2c_w(gspca_dev, i2c) < 0)
+ goto err;
+ break;
+ }
case SENSOR_OV6650:
gain >>= 1;
@@ -796,7 +783,7 @@ static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 gain;
- __u8 buf[2] = { 0, 0 };
+ __u8 buf[3] = { 0, 0, 0 };
if (sensor_data[sd->sensor].flags & F_GAIN) {
/* Use the sensor gain to do the actual gain */
@@ -804,13 +791,18 @@ static void setgain(struct gspca_dev *gspca_dev)
return;
}
- gain = sd->gain >> 4;
-
- /* red and blue gain */
- buf[0] = gain << 4 | gain;
- /* green gain */
- buf[1] = gain;
- reg_w(gspca_dev, 0x10, buf, 2);
+ if (sd->bridge == BRIDGE_103) {
+ gain = sd->gain >> 1;
+ buf[0] = gain; /* Red */
+ buf[1] = gain; /* Green */
+ buf[2] = gain; /* Blue */
+ reg_w(gspca_dev, 0x05, buf, 3);
+ } else {
+ gain = sd->gain >> 4;
+ buf[0] = gain << 4 | gain; /* Red and blue */
+ buf[1] = gain; /* Green */
+ reg_w(gspca_dev, 0x10, buf, 2);
+ }
}
static void setexposure(struct gspca_dev *gspca_dev)
@@ -1049,7 +1041,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
desired_avg_lum = 5000;
} else {
deadzone = 1500;
- desired_avg_lum = 18000;
+ desired_avg_lum = 13000;
}
if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
@@ -1127,53 +1119,91 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
- int mode, l;
- const __u8 *sn9c10x;
- __u8 reg12_19[8];
+ int i, mode;
+ __u8 regs[0x31];
mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
- sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge];
- l = sensor_data[sd->sensor].bridge_init_size[sd->bridge];
- memcpy(reg12_19, &sn9c10x[0x12 - 1], 8);
- reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4);
- /* Special cases where reg 17 and or 19 value depends on mode */
+ /* Copy registers 0x01 - 0x19 from the template */
+ memcpy(&regs[0x01], sensor_data[sd->sensor].bridge_init, 0x19);
+ /* Set the mode */
+ regs[0x18] |= mode << 4;
+
+ /* Set bridge gain to 1.0 */
+ if (sd->bridge == BRIDGE_103) {
+ regs[0x05] = 0x20; /* Red */
+ regs[0x06] = 0x20; /* Green */
+ regs[0x07] = 0x20; /* Blue */
+ } else {
+ regs[0x10] = 0x00; /* Red and blue */
+ regs[0x11] = 0x00; /* Green */
+ }
+
+ /* Setup pixel numbers and auto exposure window */
+ if (sensor_data[sd->sensor].flags & F_SIF) {
+ regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */
+ regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */
+ regs[0x1c] = 0x02; /* AE H-start 64 */
+ regs[0x1d] = 0x02; /* AE V-start 64 */
+ regs[0x1e] = 0x09; /* AE H-end 288 */
+ regs[0x1f] = 0x07; /* AE V-end 224 */
+ } else {
+ regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */
+ regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */
+ regs[0x1c] = 0x05; /* AE H-start 160 */
+ regs[0x1d] = 0x03; /* AE V-start 96 */
+ regs[0x1e] = 0x0f; /* AE H-end 480 */
+ regs[0x1f] = 0x0c; /* AE V-end 384 */
+ }
+
+ /* Setup the gamma table (only used with the sn9c103 bridge) */
+ for (i = 0; i < 16; i++)
+ regs[0x20 + i] = i * 16;
+ regs[0x20 + i] = 255;
+
+ /* Special cases where some regs depend on mode or bridge */
switch (sd->sensor) {
case SENSOR_TAS5130CXX:
- /* probably not mode specific at all most likely the upper
+ /* FIXME / TESTME
+ probably not mode specific at all most likely the upper
nibble of 0x19 is exposure (clock divider) just as with
the tas5110, we need someone to test this. */
- reg12_19[7] = mode ? 0x23 : 0x43;
+ regs[0x19] = mode ? 0x23 : 0x43;
break;
+ case SENSOR_OV7630:
+ /* FIXME / TESTME for some reason with the 101/102 bridge the
+ clock is set to 12 Mhz (reg1 == 0x04), rather then 24.
+ Also the hstart needs to go from 1 to 2 when using a 103,
+ which is likely related. This does not seem right. */
+ if (sd->bridge == BRIDGE_103) {
+ regs[0x01] = 0x44; /* Select 24 Mhz clock */
+ regs[0x12] = 0x02; /* Set hstart to 2 */
+ }
}
/* Disable compression when the raw bayer format has been selected */
if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
- reg12_19[6] &= ~0x80;
+ regs[0x18] &= ~0x80;
/* Vga mode emulation on SIF sensor? */
if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
- reg12_19[0] += 16; /* 0x12: hstart adjust */
- reg12_19[1] += 24; /* 0x13: vstart adjust */
- reg12_19[3] = 320 / 16; /* 0x15: hsize */
- reg12_19[4] = 240 / 16; /* 0x16: vsize */
+ regs[0x12] += 16; /* hstart adjust */
+ regs[0x13] += 24; /* vstart adjust */
+ regs[0x15] = 320 / 16; /* hsize */
+ regs[0x16] = 240 / 16; /* vsize */
}
/* reg 0x01 bit 2 video transfert on */
- reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1);
+ reg_w(gspca_dev, 0x01, &regs[0x01], 1);
/* reg 0x17 SensorClk enable inv Clk 0x60 */
- reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1);
+ reg_w(gspca_dev, 0x17, &regs[0x17], 1);
/* Set the registers from the template */
- reg_w(gspca_dev, 0x01, sn9c10x, l);
+ reg_w(gspca_dev, 0x01, &regs[0x01],
+ (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f);
/* Init the sensor */
i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
sensor_data[sd->sensor].sensor_init_size);
- if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
- i2c_w_vector(gspca_dev,
- sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
- sensor_data[sd->sensor].sensor_bridge_init_size[
- sd->bridge]);
- /* Mode specific sensor setup */
+ /* Mode / bridge specific sensor setup */
switch (sd->sensor) {
case SENSOR_PAS202: {
const __u8 i2cpclockdiv[] =
@@ -1181,27 +1211,37 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */
if (mode)
i2c_w(gspca_dev, i2cpclockdiv);
+ break;
}
+ case SENSOR_OV7630:
+ /* FIXME / TESTME We should be able to handle this identical
+ for the 101/102 and the 103 case */
+ if (sd->bridge == BRIDGE_103) {
+ const __u8 i2c[] = { 0xa0, 0x21, 0x13,
+ 0x80, 0x00, 0x00, 0x00, 0x10 };
+ i2c_w(gspca_dev, i2c);
+ }
+ break;
}
/* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
- reg_w(gspca_dev, 0x15, &reg12_19[3], 2);
+ reg_w(gspca_dev, 0x15, &regs[0x15], 2);
/* compression register */
- reg_w(gspca_dev, 0x18, &reg12_19[6], 1);
+ reg_w(gspca_dev, 0x18, &regs[0x18], 1);
/* H_start */
- reg_w(gspca_dev, 0x12, &reg12_19[0], 1);
+ reg_w(gspca_dev, 0x12, &regs[0x12], 1);
/* V_START */
- reg_w(gspca_dev, 0x13, &reg12_19[1], 1);
+ reg_w(gspca_dev, 0x13, &regs[0x13], 1);
/* reset 0x17 SensorClk enable inv Clk 0x60 */
/*fixme: ov7630 [17]=68 8f (+20 if 102)*/
- reg_w(gspca_dev, 0x17, &reg12_19[5], 1);
+ reg_w(gspca_dev, 0x17, &regs[0x17], 1);
/*MCKSIZE ->3 */ /*fixme: not ov7630*/
- reg_w(gspca_dev, 0x19, &reg12_19[7], 1);
+ reg_w(gspca_dev, 0x19, &regs[0x19], 1);
/* AE_STRX AE_STRY AE_ENDX AE_ENDY */
- reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4);
+ reg_w(gspca_dev, 0x1c, &regs[0x1c], 4);
/* Enable video transfert */
- reg_w(gspca_dev, 0x01, &sn9c10x[0], 1);
+ reg_w(gspca_dev, 0x01, &regs[0x01], 1);
/* Compression */
- reg_w(gspca_dev, 0x18, &reg12_19[6], 2);
+ reg_w(gspca_dev, 0x18, &regs[0x18], 2);
msleep(20);
sd->reg11 = -1;
@@ -1525,15 +1565,15 @@ static const struct sd_desc sd_desc = {
.driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */
{USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */
{USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */
{USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
+#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
@@ -1544,18 +1584,22 @@ static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
{USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
{USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
- /* {USB_DEVICE(0x0c45, 0x602b), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+ /* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+ /* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */
+ {USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)},
+ {USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)},
+ /* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */
{USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+ {USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)},
+ {USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)},
{USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
-#endif
{USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 2d0bb17a30a2..d6f39ce1b7e1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -25,12 +25,12 @@
#include "gspca.h"
#include "jpeg.h"
-#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-
MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
+static int starcam;
+
/* controls */
enum e_ctrl {
BRIGHTNESS,
@@ -43,7 +43,7 @@ enum e_ctrl {
HFLIP,
VFLIP,
SHARPNESS,
- INFRARED,
+ ILLUM,
FREQ,
NCTRLS /* number of controls */
};
@@ -100,7 +100,8 @@ enum sensors {
};
/* device flags */
-#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_PDN_INV 0x01 /* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_ILLUM 0x02 /* presence of illuminator */
/* sn9c1xx definitions */
/* register 0x01 */
@@ -124,7 +125,7 @@ static void setgamma(struct gspca_dev *gspca_dev);
static void setautogain(struct gspca_dev *gspca_dev);
static void sethvflip(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
-static void setinfrared(struct gspca_dev *gspca_dev);
+static void setillum(struct gspca_dev *gspca_dev);
static void setfreq(struct gspca_dev *gspca_dev);
static const struct ctrl sd_ctrls[NCTRLS] = {
@@ -251,18 +252,17 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
},
.set_control = setsharpness
},
-/* mt9v111 only */
-[INFRARED] = {
+[ILLUM] = {
{
- .id = V4L2_CID_INFRARED,
+ .id = V4L2_CID_ILLUMINATORS_1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Infrared",
+ .name = "Illuminator / infrared",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
- .set_control = setinfrared
+ .set_control = setillum
},
/* ov7630/ov7648/ov7660 only */
[FREQ] = {
@@ -282,32 +282,26 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
/* table of the disabled controls */
static const __u32 ctrl_dis[] = {
[SENSOR_ADCM1700] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_GC0307] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_GC0307] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_HV7131R] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_HV7131R] = (1 << HFLIP) |
(1 << FREQ),
-[SENSOR_MI0360] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MI0360] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_MI0360B] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MI0360B] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_MO4000] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MO4000] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
@@ -315,40 +309,32 @@ static const __u32 ctrl_dis[] = {
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_OM6802] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_OM6802] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_OV7630] = (1 << INFRARED) |
- (1 << HFLIP),
+[SENSOR_OV7630] = (1 << HFLIP),
-[SENSOR_OV7648] = (1 << INFRARED) |
- (1 << HFLIP),
+[SENSOR_OV7648] = (1 << HFLIP),
[SENSOR_OV7660] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP),
[SENSOR_PO1030] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
[SENSOR_PO2030N] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << FREQ),
[SENSOR_SOI768] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
[SENSOR_SP80708] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
@@ -1822,44 +1808,46 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
switch (sd->bridge) {
case BRIDGE_SN9C102P:
+ case BRIDGE_SN9C105:
if (regF1 != 0x11)
return -ENODEV;
+ break;
+ default:
+/* case BRIDGE_SN9C110: */
+/* case BRIDGE_SN9C120: */
+ if (regF1 != 0x12)
+ return -ENODEV;
+ }
+
+ switch (sd->sensor) {
+ case SENSOR_MI0360:
+ mi0360_probe(gspca_dev);
+ break;
+ case SENSOR_OV7630:
+ ov7630_probe(gspca_dev);
+ break;
+ case SENSOR_OV7648:
+ ov7648_probe(gspca_dev);
+ break;
+ case SENSOR_PO2030N:
+ po2030n_probe(gspca_dev);
+ break;
+ }
+
+ switch (sd->bridge) {
+ case BRIDGE_SN9C102P:
reg_w1(gspca_dev, 0x02, regGpio[1]);
break;
case BRIDGE_SN9C105:
- if (regF1 != 0x11)
- return -ENODEV;
- if (sd->sensor == SENSOR_MI0360)
- mi0360_probe(gspca_dev);
reg_w(gspca_dev, 0x01, regGpio, 2);
break;
+ case BRIDGE_SN9C110:
+ reg_w1(gspca_dev, 0x02, 0x62);
+ break;
case BRIDGE_SN9C120:
- if (regF1 != 0x12)
- return -ENODEV;
- switch (sd->sensor) {
- case SENSOR_MI0360:
- mi0360_probe(gspca_dev);
- break;
- case SENSOR_OV7630:
- ov7630_probe(gspca_dev);
- break;
- case SENSOR_OV7648:
- ov7648_probe(gspca_dev);
- break;
- case SENSOR_PO2030N:
- po2030n_probe(gspca_dev);
- break;
- }
regGpio[1] = 0x70; /* no audio */
reg_w(gspca_dev, 0x01, regGpio, 2);
break;
- default:
-/* case BRIDGE_SN9C110: */
-/* case BRIDGE_SN9C325: */
- if (regF1 != 0x12)
- return -ENODEV;
- reg_w1(gspca_dev, 0x02, 0x62);
- break;
}
if (sd->sensor == SENSOR_OM6802)
@@ -1874,6 +1862,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->i2c_addr = sn9c1xx[9];
gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
+ if (!(sd->flags & F_ILLUM))
+ gspca_dev->ctrl_dis |= (1 << ILLUM);
return gspca_dev->usb_err;
}
@@ -2197,16 +2187,28 @@ static void setsharpness(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
}
-static void setinfrared(struct gspca_dev *gspca_dev)
+static void setillum(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << INFRARED))
+ if (gspca_dev->ctrl_dis & (1 << ILLUM))
return;
-/*fixme: different sequence for StarCam Clip and StarCam 370i */
-/* Clip */
- i2c_w1(gspca_dev, 0x02, /* gpio */
- sd->ctrls[INFRARED].val ? 0x66 : 0x64);
+ switch (sd->sensor) {
+ case SENSOR_ADCM1700:
+ reg_w1(gspca_dev, 0x02, /* gpio */
+ sd->ctrls[ILLUM].val ? 0x64 : 0x60);
+ break;
+ case SENSOR_MT9V111:
+ if (starcam)
+ reg_w1(gspca_dev, 0x02,
+ sd->ctrls[ILLUM].val ?
+ 0x55 : 0x54); /* 370i */
+ else
+ reg_w1(gspca_dev, 0x02,
+ sd->ctrls[ILLUM].val ?
+ 0x66 : 0x64); /* Clip */
+ break;
+ }
}
static void setfreq(struct gspca_dev *gspca_dev)
@@ -2344,7 +2346,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* sensor clock already enabled in sd_init */
/* reg_w1(gspca_dev, 0xf1, 0x00); */
reg01 = sn9c1xx[1];
- if (sd->flags & PDN_INV)
+ if (sd->flags & F_PDN_INV)
reg01 ^= S_PDN_INV; /* power down inverted */
reg_w1(gspca_dev, 0x01, reg01);
@@ -2907,13 +2909,11 @@ static const struct sd_desc sd_desc = {
.driver_info = (BRIDGE_ ## bridge << 16) \
| (SENSOR_ ## sensor << 8) \
| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
-#endif
- {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
- {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
+ {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
+ {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
@@ -2925,7 +2925,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */
{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
- {USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+ {USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)},
/* or MT9V111 */
/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
@@ -2936,10 +2936,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */
/* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */
{USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)},
{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
-#endif
{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/
{USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/
/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
@@ -2962,16 +2960,15 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)},
-#endif
{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
{USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
/* or GC0305 / GC0307 */
{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
- {USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/
+ {USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)},
+/* {USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */ /*sn9c120b*/
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
@@ -3007,3 +3004,7 @@ static void __exit sd_mod_exit(void)
module_init(sd_mod_init);
module_exit(sd_mod_exit);
+
+module_param(starcam, int, 0644);
+MODULE_PARM_DESC(starcam,
+ "StarCam model. 0: Clip, 1: 370i");
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index e64338664410..76c006b2bc83 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -555,7 +555,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04fc, 0x1528)},
{}
};
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8e202b9039f1..45552c3ff8d9 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1051,7 +1051,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
{USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
{USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 642839a11e8d..f7ef282cc600 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2155,7 +2155,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325},
{USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera},
{USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite},
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index bc9dd9034ab4..e5bf865147d7 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -786,7 +786,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 7307638ac91d..348319371523 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1509,7 +1509,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 3a162c6d5466..e836e778dfb6 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1061,7 +1061,7 @@ static const struct sd_desc *sd_desc[2] = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
{USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
{USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 404067745775..2e9c06175192 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -396,7 +396,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x2770, 0x9120)},
{}
};
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 8ba199543856..457563b7a71b 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -298,7 +298,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x2770, 0x905c)},
{USB_DEVICE(0x2770, 0x9050)},
{USB_DEVICE(0x2770, 0x9051)},
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index a4a98811b9e3..8215d5dcd456 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1163,7 +1163,7 @@ static const struct sd_desc sd_desc = {
#define ST(sensor, type) \
.driver_info = (SENSOR_ ## sensor << 8) \
| (type)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)},
{USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)},
{USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)},
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 11a192b95ed4..87be52b5e1e3 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -495,7 +495,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e1, 0x0893)},
{}
};
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index b199ad4666bd..e2ef41cf72d7 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -327,7 +327,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0553, 0x0202)},
{USB_DEVICE(0x041e, 0x4007)},
{}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 28ea4175b80e..7e0661429293 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -564,7 +564,7 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
/* QuickCam Express */
{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
/* LEGO cam / QuickCam Web */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index a9cbcd6011d9..543542af2720 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1162,7 +1162,7 @@ static const struct sd_desc sd_desc = {
#define BS(bridge, subtype) \
.driver_info = (BRIDGE_ ## bridge << 8) \
| (subtype)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)},
{USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)},
{USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)},
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 8f0c33116e0d..a3eccd815766 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1416,7 +1416,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x17a1, 0x0128)},
{}
};
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 38c22f0a4263..933ef2ca658c 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -388,7 +388,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x046d, 0x0920)},
{USB_DEVICE(0x046d, 0x0921)},
{USB_DEVICE(0x0545, 0x808b)},
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 9b2ae1b6cc75..6caed734a06a 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4192,7 +4192,7 @@ static const struct sd_desc sd_desc = {
#define BF(bridge, flags) \
.driver_info = (BRIDGE_ ## bridge << 8) \
| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)},
{USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)},
{USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)},
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 5b5039a02031..c089a0f6f1d0 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3270,7 +3270,7 @@ static const struct sd_desc sd_desc_isoc_nego = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{ USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
{ USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
{ USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 14b85d483163..865216e9362c 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6909,7 +6909,7 @@ static const struct sd_desc sd_desc = {
#endif
};
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x041e)},
{USB_DEVICE(0x041e, 0x4017)},
{USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index e0230fcb2e36..3baa9f613ca3 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -1,6 +1,4 @@
-hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o
-
-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
+hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index f7d1ee55185a..a6572e5ae369 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -378,19 +378,17 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
-#ifdef CONFIG_I2C
- /* until i2c is working properly */
- retval = 0; /* hdpvr_register_i2c_adapter(dev); */
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
goto error;
}
- /* until i2c is working properly */
- retval = 0; /* hdpvr_register_i2c_ir(dev); */
+ retval = hdpvr_register_i2c_ir(dev);
if (retval < 0)
v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
-#endif /* CONFIG_I2C */
+#endif
/* let the user know what node this device is now attached to */
v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 24966aa02a70..89b71faeaac2 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -13,6 +13,8 @@
*
*/
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -28,106 +30,78 @@
#define Z8F0811_IR_TX_I2C_ADDR 0x70
#define Z8F0811_IR_RX_I2C_ADDR 0x71
-static const u8 ir_i2c_addrs[] = {
- Z8F0811_IR_TX_I2C_ADDR,
- Z8F0811_IR_RX_I2C_ADDR,
-};
-static const char * const ir_devicenames[] = {
- "ir_tx_z8f0811_hdpvr",
- "ir_rx_z8f0811_hdpvr",
+static struct i2c_board_info hdpvr_i2c_board_info = {
+ I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+ I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
};
-static int hdpvr_new_i2c_ir(struct hdpvr_device *dev, struct i2c_adapter *adap,
- const char *type, u8 addr)
+int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
{
- struct i2c_board_info info;
+ struct i2c_client *c;
struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
- unsigned short addr_list[2] = { addr, I2C_CLIENT_END };
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, type, I2C_NAME_SIZE);
/* Our default information for ir-kbd-i2c.c to use */
- switch (addr) {
- case Z8F0811_IR_RX_I2C_ADDR:
- init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
- init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
- init_data->name = "HD PVR";
- info.platform_data = init_data;
- break;
- }
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = "HD PVR";
+ hdpvr_i2c_board_info.platform_data = init_data;
- return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
- -1 : 0;
-}
+ c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(ir_i2c_addrs); i++)
- ret += hdpvr_new_i2c_ir(dev, dev->i2c_adapter,
- ir_devicenames[i], ir_i2c_addrs[i]);
-
- return ret;
+ return (c == NULL) ? -ENODEV : 0;
}
-static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr,
- char *data, int len)
+static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
+ unsigned char addr, char *data, int len)
{
int ret;
- char *buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+
+ if (len > sizeof(dev->i2c_buf))
+ return -EINVAL;
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_READ, CTRL_READ_REQUEST,
- 0x100|addr, 0, buf, len, 1000);
+ (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
if (ret == len) {
- memcpy(data, buf, len);
+ memcpy(data, &dev->i2c_buf, len);
ret = 0;
} else if (ret >= 0)
ret = -EIO;
- kfree(buf);
-
return ret;
}
-static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr,
- char *data, int len)
+static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
+ unsigned char addr, char *data, int len)
{
int ret;
- char *buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- memcpy(buf, data, len);
+ if (len > sizeof(dev->i2c_buf))
+ return -EINVAL;
+
+ memcpy(&dev->i2c_buf, data, len);
ret = usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
- 0x100|addr, 0, buf, len, 1000);
+ (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
if (ret < 0)
- goto error;
+ return ret;
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
- 0, 0, buf, 2, 1000);
+ 0, 0, &dev->i2c_buf, 2, 1000);
- if (ret == 2)
+ if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
ret = 0;
else if (ret >= 0)
ret = -EIO;
-error:
- kfree(buf);
return ret;
}
@@ -146,10 +120,10 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
addr = msgs[i].addr << 1;
if (msgs[i].flags & I2C_M_RD)
- retval = hdpvr_i2c_read(dev, addr, msgs[i].buf,
+ retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf,
msgs[i].len);
else
- retval = hdpvr_i2c_write(dev, addr, msgs[i].buf,
+ retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf,
msgs[i].len);
}
@@ -168,30 +142,47 @@ static struct i2c_algorithm hdpvr_algo = {
.functionality = hdpvr_functionality,
};
+static struct i2c_adapter hdpvr_i2c_adapter_template = {
+ .name = "Hauppage HD PVR I2C",
+ .owner = THIS_MODULE,
+ .algo = &hdpvr_algo,
+};
+
+static int hdpvr_activate_ir(struct hdpvr_device *dev)
+{
+ char buffer[8];
+
+ mutex_lock(&dev->i2c_mutex);
+
+ hdpvr_i2c_read(dev, 0, 0x54, buffer, 1);
+
+ buffer[0] = 0;
+ buffer[1] = 0x8;
+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+ buffer[1] = 0x18;
+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+ mutex_unlock(&dev->i2c_mutex);
+
+ return 0;
+}
+
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
{
- struct i2c_adapter *i2c_adap;
int retval = -ENOMEM;
- i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
- if (i2c_adap == NULL)
- goto error;
-
- strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
- sizeof(i2c_adap->name));
- i2c_adap->algo = &hdpvr_algo;
- i2c_adap->owner = THIS_MODULE;
- i2c_adap->dev.parent = &dev->udev->dev;
+ hdpvr_activate_ir(dev);
- i2c_set_adapdata(i2c_adap, dev);
+ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
+ sizeof(struct i2c_adapter));
+ dev->i2c_adapter.dev.parent = &dev->udev->dev;
- retval = i2c_add_adapter(i2c_adap);
+ i2c_set_adapdata(&dev->i2c_adapter, dev);
- if (!retval)
- dev->i2c_adapter = i2c_adap;
- else
- kfree(i2c_adap);
+ retval = i2c_add_adapter(&dev->i2c_adapter);
-error:
return retval;
}
+
+#endif
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d38fe1043e47..514aea76eaa5 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,12 +1220,9 @@ static void hdpvr_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
/* deregister I2C adapter */
-#ifdef CONFIG_I2C
+#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE)
mutex_lock(&dev->i2c_mutex);
- if (dev->i2c_adapter)
- i2c_del_adapter(dev->i2c_adapter);
- kfree(dev->i2c_adapter);
- dev->i2c_adapter = NULL;
+ i2c_del_adapter(&dev->i2c_adapter);
mutex_unlock(&dev->i2c_mutex);
#endif /* CONFIG_I2C */
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 37f1e4c7675d..ee74e3be9a6a 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -25,6 +25,7 @@
KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
#define HDPVR_MAX 8
+#define HDPVR_I2C_MAX_SIZE 128
/* Define these values to match your devices */
#define HD_PVR_VENDOR_ID 0x2040
@@ -106,9 +107,11 @@ struct hdpvr_device {
struct work_struct worker;
/* I2C adapter */
- struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter i2c_adapter;
/* I2C lock */
struct mutex i2c_mutex;
+ /* I2C message buffer space */
+ char i2c_buf[HDPVR_I2C_MAX_SIZE];
/* For passing data to ir-kbd-i2c */
struct IR_i2c_init_data ir_i2c_init_data;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index c87b6bc45555..d2b20ad383a3 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -244,15 +244,17 @@ static void ir_key_poll(struct IR_i2c *ir)
static u32 ir_key, ir_raw;
int rc;
- dprintk(2,"ir_poll_key\n");
+ dprintk(3, "%s\n", __func__);
rc = ir->get_key(ir, &ir_key, &ir_raw);
if (rc < 0) {
dprintk(2,"error\n");
return;
}
- if (rc)
+ if (rc) {
+ dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
rc_keydown(ir->rc, ir_key, 0);
+ }
}
static void ir_work(struct work_struct *work)
@@ -321,6 +323,12 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc_type = RC_TYPE_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
+ case 0x71:
+ name = "Hauppauge/Zilog Z8";
+ ir->get_key = get_key_haup_xvr;
+ rc_type = RC_TYPE_RC5;
+ ir_codes = hauppauge ? RC_MAP_HAUPPAUGE_NEW : RC_MAP_RC5_TV;
+ break;
}
/* Let the caller override settings */
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index e103b8fc7452..9fb86a081c0f 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -300,10 +300,15 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
} else if (hw == IVTV_HW_CX25840) {
struct cx25840_platform_data pdata;
+ struct i2c_board_info cx25840_info = {
+ .type = "cx25840",
+ .addr = hw_addrs[idx],
+ .platform_data = &pdata,
+ };
pdata.pvr150_workaround = itv->pvr150_workaround;
- sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
- adap, type, 0, &pdata, hw_addrs[idx], NULL);
+ sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
+ &cx25840_info, NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 209ff97261a9..4904d25f689f 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -12,17 +12,41 @@
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include "mt9v011.h"
+#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_LICENSE("GPL");
-
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
+#define R00_MT9V011_CHIP_VERSION 0x00
+#define R01_MT9V011_ROWSTART 0x01
+#define R02_MT9V011_COLSTART 0x02
+#define R03_MT9V011_HEIGHT 0x03
+#define R04_MT9V011_WIDTH 0x04
+#define R05_MT9V011_HBLANK 0x05
+#define R06_MT9V011_VBLANK 0x06
+#define R07_MT9V011_OUT_CTRL 0x07
+#define R09_MT9V011_SHUTTER_WIDTH 0x09
+#define R0A_MT9V011_CLK_SPEED 0x0a
+#define R0B_MT9V011_RESTART 0x0b
+#define R0C_MT9V011_SHUTTER_DELAY 0x0c
+#define R0D_MT9V011_RESET 0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
+#define R20_MT9V011_READ_MODE 0x20
+#define R2B_MT9V011_GREEN_1_GAIN 0x2b
+#define R2C_MT9V011_BLUE_GAIN 0x2c
+#define R2D_MT9V011_RED_GAIN 0x2d
+#define R2E_MT9V011_GREEN_2_GAIN 0x2e
+#define R35_MT9V011_GLOBAL_GAIN 0x35
+#define RF1_MT9V011_CHIP_ENABLE 0xf1
+
+#define MT9V011_VERSION 0x8232
+#define MT9V011_REV_B_VERSION 0x8243
+
/* supported controls */
static struct v4l2_queryctrl mt9v011_qctrl[] = {
{
@@ -469,23 +493,6 @@ static int mt9v011_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
return 0;
}
-static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
- struct mt9v011 *core = to_mt9v011(sd);
- unsigned *xtal = data;
-
- v4l2_dbg(1, debug, sd, "s_config called\n");
-
- if (xtal) {
- core->xtal = *xtal;
- v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
- *xtal / 1000000, (*xtal / 1000) % 1000);
- }
-
- return 0;
-}
-
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9v011_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
@@ -536,7 +543,6 @@ static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.g_ctrl = mt9v011_g_ctrl,
.s_ctrl = mt9v011_s_ctrl,
.reset = mt9v011_reset,
- .s_config = mt9v011_s_config,
.g_chip_ident = mt9v011_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v011_g_register,
@@ -596,6 +602,14 @@ static int mt9v011_probe(struct i2c_client *c,
core->height = 480;
core->xtal = 27000000; /* Hz */
+ if (c->dev.platform_data) {
+ struct mt9v011_platform_data *pdata = c->dev.platform_data;
+
+ core->xtal = pdata->xtal;
+ v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
+ core->xtal / 1000000, (core->xtal / 1000) % 1000);
+ }
+
v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
c->addr << 1, c->adapter->name, version);
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
deleted file mode 100644
index 3350fd6083c3..000000000000
--- a/drivers/media/video/mt9v011.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
- *
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
- * This code is placed under the terms of the GNU General Public License v2
- */
-
-#ifndef MT9V011_H_
-#define MT9V011_H_
-
-#define R00_MT9V011_CHIP_VERSION 0x00
-#define R01_MT9V011_ROWSTART 0x01
-#define R02_MT9V011_COLSTART 0x02
-#define R03_MT9V011_HEIGHT 0x03
-#define R04_MT9V011_WIDTH 0x04
-#define R05_MT9V011_HBLANK 0x05
-#define R06_MT9V011_VBLANK 0x06
-#define R07_MT9V011_OUT_CTRL 0x07
-#define R09_MT9V011_SHUTTER_WIDTH 0x09
-#define R0A_MT9V011_CLK_SPEED 0x0a
-#define R0B_MT9V011_RESTART 0x0b
-#define R0C_MT9V011_SHUTTER_DELAY 0x0c
-#define R0D_MT9V011_RESET 0x0d
-#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
-#define R20_MT9V011_READ_MODE 0x20
-#define R2B_MT9V011_GREEN_1_GAIN 0x2b
-#define R2C_MT9V011_BLUE_GAIN 0x2c
-#define R2D_MT9V011_RED_GAIN 0x2d
-#define R2E_MT9V011_GREEN_2_GAIN 0x2e
-#define R35_MT9V011_GLOBAL_GAIN 0x35
-#define RF1_MT9V011_CHIP_ENABLE 0xf1
-
-#define MT9V011_VERSION 0x8232
-#define MT9V011_REV_B_VERSION 0x8243
-
-#endif
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index c881a64b41fd..d4e7c11553c3 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1449,47 +1449,6 @@ static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
}
-static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov7670_config *config = data;
- struct ov7670_info *info = to_state(sd);
- int ret;
-
- info->clock_speed = 30; /* default: a guess */
-
- /*
- * Must apply configuration before initializing device, because it
- * selects I/O method.
- */
- if (config) {
- info->min_width = config->min_width;
- info->min_height = config->min_height;
- info->use_smbus = config->use_smbus;
-
- if (config->clock_speed)
- info->clock_speed = config->clock_speed;
- }
-
- /* Make sure it's an ov7670 */
- ret = ov7670_detect(sd);
- if (ret) {
- v4l_dbg(1, debug, client,
- "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
- client->addr << 1, client->adapter->name);
- kfree(info);
- return ret;
- }
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = info->clock_speed / 30;
-
- return 0;
-}
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -1528,7 +1487,6 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.s_ctrl = ov7670_s_ctrl,
.queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
- .s_config = ov7670_s_config,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7670_g_register,
@@ -1558,6 +1516,7 @@ static int ov7670_probe(struct i2c_client *client,
{
struct v4l2_subdev *sd;
struct ov7670_info *info;
+ int ret;
info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
if (info == NULL)
@@ -1565,6 +1524,37 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
+ info->clock_speed = 30; /* default: a guess */
+ if (client->dev.platform_data) {
+ struct ov7670_config *config = client->dev.platform_data;
+
+ /*
+ * Must apply configuration before initializing device, because it
+ * selects I/O method.
+ */
+ info->min_width = config->min_width;
+ info->min_height = config->min_height;
+ info->use_smbus = config->use_smbus;
+
+ if (config->clock_speed)
+ info->clock_speed = config->clock_speed;
+ }
+
+ /* Make sure it's an ov7670 */
+ ret = ov7670_detect(sd);
+ if (ret) {
+ v4l_dbg(1, debug, client,
+ "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+ client->addr << 1, client->adapter->name);
+ kfree(info);
+ return ret;
+ }
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ info->fmt = &ov7670_formats[0];
+ info->sat = 128; /* Review this */
+ info->clkrc = info->clock_speed / 30;
return 0;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index ac94a8bf883e..305e6aaa844a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -40,6 +40,7 @@
#include "pvrusb2-io.h"
#include <media/v4l2-device.h>
#include <media/cx2341x.h>
+#include <media/ir-kbd-i2c.h>
#include "pvrusb2-devattr.h"
/* Legal values for PVR2_CID_HSM */
@@ -202,6 +203,7 @@ struct pvr2_hdw {
/* IR related */
unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
+ struct IR_i2c_init_data ir_init_data; /* params passed to IR modules */
/* Frequency table */
unsigned int freqTable[FREQTABLE_SIZE];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 7cbe18c4ca95..ccc884948f34 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
*/
#include <linux/i2c.h>
+#include <media/ir-kbd-i2c.h>
#include "pvrusb2-i2c-core.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
@@ -48,13 +49,6 @@ module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
MODULE_PARM_DESC(disable_autoload_ir_video,
"1=do not try to autoload ir_video IR receiver");
-/* Mapping of IR schemes to known I2C addresses - if any */
-static const unsigned char ir_video_addresses[] = {
- [PVR2_IR_SCHEME_ZILOG] = 0x71,
- [PVR2_IR_SCHEME_29XXX] = 0x18,
- [PVR2_IR_SCHEME_24XXX] = 0x18,
-};
-
static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
u8 i2c_addr, /* I2C address we're talking to */
u8 *data, /* Data to write */
@@ -574,26 +568,56 @@ static void do_i2c_scan(struct pvr2_hdw *hdw)
static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
{
struct i2c_board_info info;
- unsigned char addr = 0;
+ struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
if (pvr2_disable_ir_video) {
pvr2_trace(PVR2_TRACE_INFO,
"Automatic binding of ir_video has been disabled.");
return;
}
- if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
- addr = ir_video_addresses[hdw->ir_scheme_active];
- }
- if (!addr) {
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ switch (hdw->ir_scheme_active) {
+ case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
+ case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = hdw->hdw_desc->description;
+ init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
+ /* IR Receiver */
+ info.addr = 0x18;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ break;
+ case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */
+ case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = hdw->hdw_desc->description;
+ init_data->polling_interval = 260; /* ms From lirc_zilog */
+ /* IR Receiver */
+ info.addr = 0x71;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ /* IR Trasmitter */
+ info.addr = 0x70;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ break;
+ default:
/* The device either doesn't support I2C-based IR or we
don't know (yet) how to operate IR on the device. */
- return;
+ break;
}
- pvr2_trace(PVR2_TRACE_INFO,
- "Binding ir_video to i2c address 0x%02x.", addr);
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- info.addr = addr;
- i2c_new_device(&hdw->i2c_adap, &info);
}
void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e7aa588c6c5a..deb8fcf4aa49 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5179,18 +5179,8 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = {
.name = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid",
.audio_clock = 0x00187de7,
-#if 0
- /*
- * FIXME: Analog mode doesn't work, if digital is enabled. The proper
- * fix is to use tda8290 driver, but Kworld seems to use an
- * unsupported version of tda8295.
- */
- .tuner_type = TUNER_NXP_TDA18271, /* TUNER_PHILIPS_TDA8290 */
- .tuner_addr = 0x60,
-#else
- .tuner_type = UNSET,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = ADDR_UNSET,
-#endif
.radio_type = UNSET,
.radio_addr = ADDR_UNSET,
.gpiomask = 0x8e054000,
@@ -6932,10 +6922,17 @@ static inline int saa7134_kworld_sbtvd_toggle_agc(struct saa7134_dev *dev,
/* toggle AGC switch through GPIO 27 */
switch (mode) {
case TDA18271_ANALOG:
- saa7134_set_gpio(dev, 27, 0);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
+ msleep(20);
break;
case TDA18271_DIGITAL:
- saa7134_set_gpio(dev, 27, 1);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
+ msleep(20);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
+ msleep(30);
break;
default:
return -EINVAL;
@@ -6993,6 +6990,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
int saa7134_tuner_callback(void *priv, int component, int command, int arg)
{
struct saa7134_dev *dev = priv;
+
if (dev != NULL) {
switch (dev->tuner_type) {
case TUNER_PHILIPS_TDA8290:
@@ -7659,36 +7657,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
break;
}
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
- {
- struct i2c_msg msg = { .addr = 0x4b, .flags = 0 };
- int i;
- static u8 buffer[][2] = {
- {0x30, 0x31},
- {0xff, 0x00},
- {0x41, 0x03},
- {0x41, 0x1a},
- {0xff, 0x02},
- {0x34, 0x00},
- {0x45, 0x97},
- {0x45, 0xc1},
- };
saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
- /*
- * FIXME: identify what device is at addr 0x4b and what means
- * this initialization
- */
- for (i = 0; i < ARRAY_SIZE(buffer); i++) {
- msg.buf = &buffer[i][0];
- msg.len = ARRAY_SIZE(buffer[0]);
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
- printk(KERN_WARNING
- "%s: Unable to enable tuner(%i).\n",
- dev->name, i);
- }
+ saa7134_set_gpio(dev, 27, 0);
break;
- }
} /* switch() */
/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 3315a48a848b..f65cad287b83 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -237,12 +237,39 @@ static struct tda18271_std_map mb86a20s_tda18271_std_map = {
static struct tda18271_config kworld_tda18271_config = {
.std_map = &mb86a20s_tda18271_std_map,
.gate = TDA18271_GATE_DIGITAL,
+ .config = 3, /* Use tuner callback for AGC */
+
};
static const struct mb86a20s_config kworld_mb86a20s_config = {
.demod_address = 0x10,
};
+static int kworld_sbtvd_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+ struct saa7134_dev *dev = fe->dvb->priv;
+
+ unsigned char initmsg[] = {0x45, 0x97};
+ unsigned char msg_enable[] = {0x45, 0xc1};
+ unsigned char msg_disable[] = {0x45, 0x81};
+ struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
+
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+ wprintk("could not access the I2C gate\n");
+ return -EIO;
+ }
+ if (enable)
+ msg.buf = msg_enable;
+ else
+ msg.buf = msg_disable;
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+ wprintk("could not access the I2C gate\n");
+ return -EIO;
+ }
+ msleep(20);
+ return 0;
+}
+
/* ==================================================================
* tda1004x based DVB-T cards, helper functions
*/
@@ -623,37 +650,6 @@ static struct tda827x_config tda827x_cfg_2_sw42 = {
/* ------------------------------------------------------------------ */
-static int __kworld_sbtvd_i2c_gate_ctrl(struct saa7134_dev *dev, int enable)
-{
- unsigned char initmsg[] = {0x45, 0x97};
- unsigned char msg_enable[] = {0x45, 0xc1};
- unsigned char msg_disable[] = {0x45, 0x81};
- struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
-
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
- wprintk("could not access the I2C gate\n");
- return -EIO;
- }
- if (enable)
- msg.buf = msg_enable;
- else
- msg.buf = msg_disable;
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
- wprintk("could not access the I2C gate\n");
- return -EIO;
- }
- msleep(20);
- return 0;
-}
-static int kworld_sbtvd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- struct saa7134_dev *dev = fe->dvb->priv;
-
- return __kworld_sbtvd_i2c_gate_ctrl(dev, enable);
-}
-
-/* ------------------------------------------------------------------ */
-
static struct tda1004x_config tda827x_lifeview_config = {
.demod_address = 0x08,
.invert = 1,
@@ -1660,27 +1656,23 @@ static int dvb_init(struct saa7134_dev *dev)
}
break;
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
- __kworld_sbtvd_i2c_gate_ctrl(dev, 0);
- saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
- saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
- msleep(20);
- saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
- saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
- msleep(20);
+ /* Switch to digital mode */
+ saa7134_tuner_callback(dev, 0,
+ TDA18271_CALLBACK_CMD_AGC_ENABLE, 1);
fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
&kworld_mb86a20s_config,
&dev->i2c_adap);
- __kworld_sbtvd_i2c_gate_ctrl(dev, 1);
if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
dvb_attach(tda18271_attach, fe0->dvb.frontend,
0x60, &dev->i2c_adap,
&kworld_tda18271_config);
- /*
- * Only after success, it can initialize the gate, otherwise
- * an OOPS will hit, due to kfree(fe0->dvb.frontend)
- */
- fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_i2c_gate_ctrl;
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_gate_ctrl;
}
+
+ /* mb86a20s need to use the I2C gateway */
break;
default:
wprintk("Huh? unknown DVB card?\n");
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 41064c7b5ef8..b3d2cc729657 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -47,8 +47,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
+#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
@@ -56,78 +56,68 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
+ { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, /* not in sonixb */
#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
{ SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
+ { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, /* not in sonixb */
/* SN9C103 */
- { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, non existent ? */
+ { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, /* not in sonixb */
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
/* { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
- { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, non existent ? */
{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
-#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, non existent ? */
/* { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
/* { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
-/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */
- { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5110, non existent */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, non existent ? */
{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, non existent ? */
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
/* SN9C105 */
#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, PO1030 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, OM6801 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, HV7131GP */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, MO4000 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, ICM105C */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, OV7648 */
{ SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
/* SN9C120 */
{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
- { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
-#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
- { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, po2030 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, om6801 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, S5K53BEB */
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
-#endif
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
#endif
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 864696b7a006..c901721a1db3 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -714,15 +714,6 @@ static int sr030pc30_base_config(struct v4l2_subdev *sd)
return ret;
}
-static int sr030pc30_s_config(struct v4l2_subdev *sd,
- int irq, void *platform_data)
-{
- struct sr030pc30_info *info = to_sr030pc30(sd);
-
- info->pdata = platform_data;
- return 0;
-}
-
static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
@@ -763,7 +754,6 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
}
static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
- .s_config = sr030pc30_s_config,
.s_power = sr030pc30_s_power,
.queryctrl = sr030pc30_queryctrl,
.s_ctrl = sr030pc30_s_ctrl,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
deleted file mode 100644
index 35b6ff5db319..000000000000
--- a/drivers/media/video/tda9875.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * For the TDA9875 chip
- * (The TDA9875 is used on the Diamond DTV2000 french version
- * Other cards probably use these chips as well.)
- * This driver will not complain if used with any
- * other i2c device with the same address.
- *
- * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source and
- * Eric Sandeen
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
- * This code is placed under the terms of the GNU General Public License
- * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
- * Which was based on tda8425.c by Greg Alexander (c) 1998
- *
- * OPTIONS:
- * debug - set to 1 if you'd like to see debug messages
- *
- * Revision: 0.1 - original version
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/i2c-addr.h>
-
-static int debug; /* insmod parameter */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_LICENSE("GPL");
-
-
-/* This is a superset of the TDA9875 */
-struct tda9875 {
- struct v4l2_subdev sd;
- int rvol, lvol;
- int bass, treble;
-};
-
-static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct tda9875, sd);
-}
-
-#define dprintk if (debug) printk
-
-/* The TDA9875 is made by Philips Semiconductor
- * http://www.semiconductors.philips.com
- * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator
- *
- */
-
- /* subaddresses for TDA9875 */
-#define TDA9875_MUT 0x12 /*General mute (value --> 0b11001100*/
-#define TDA9875_CFG 0x01 /* Config register (value --> 0b00000000 */
-#define TDA9875_DACOS 0x13 /*DAC i/o select (ADC) 0b0000100*/
-#define TDA9875_LOSR 0x16 /*Line output select regirter 0b0100 0001*/
-
-#define TDA9875_CH1V 0x0c /*Channel 1 volume (mute)*/
-#define TDA9875_CH2V 0x0d /*Channel 2 volume (mute)*/
-#define TDA9875_SC1 0x14 /*SCART 1 in (mono)*/
-#define TDA9875_SC2 0x15 /*SCART 2 in (mono)*/
-
-#define TDA9875_ADCIS 0x17 /*ADC input select (mono) 0b0110 000*/
-#define TDA9875_AER 0x19 /*Audio effect (AVL+Pseudo) 0b0000 0110*/
-#define TDA9875_MCS 0x18 /*Main channel select (DAC) 0b0000100*/
-#define TDA9875_MVL 0x1a /* Main volume gauche */
-#define TDA9875_MVR 0x1b /* Main volume droite */
-#define TDA9875_MBA 0x1d /* Main Basse */
-#define TDA9875_MTR 0x1e /* Main treble */
-#define TDA9875_ACS 0x1f /* Auxilary channel select (FM) 0b0000000*/
-#define TDA9875_AVL 0x20 /* Auxilary volume gauche */
-#define TDA9875_AVR 0x21 /* Auxilary volume droite */
-#define TDA9875_ABA 0x22 /* Auxilary Basse */
-#define TDA9875_ATR 0x23 /* Auxilary treble */
-
-#define TDA9875_MSR 0x02 /* Monitor select register */
-#define TDA9875_C1MSB 0x03 /* Carrier 1 (FM) frequency register MSB */
-#define TDA9875_C1MIB 0x04 /* Carrier 1 (FM) frequency register (16-8]b */
-#define TDA9875_C1LSB 0x05 /* Carrier 1 (FM) frequency register LSB */
-#define TDA9875_C2MSB 0x06 /* Carrier 2 (nicam) frequency register MSB */
-#define TDA9875_C2MIB 0x07 /* Carrier 2 (nicam) frequency register (16-8]b */
-#define TDA9875_C2LSB 0x08 /* Carrier 2 (nicam) frequency register LSB */
-#define TDA9875_DCR 0x09 /* Demodulateur configuration regirter*/
-#define TDA9875_DEEM 0x0a /* FM de-emphasis regirter*/
-#define TDA9875_FMAT 0x0b /* FM Matrix regirter*/
-
-/* values */
-#define TDA9875_MUTE_ON 0xff /* general mute */
-#define TDA9875_MUTE_OFF 0xcc /* general no mute */
-
-
-
-/* Begin code */
-
-static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned char buffer[2];
-
- v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
- buffer[0] = subaddr;
- buffer[1] = val;
- if (2 != i2c_master_send(client, buffer, 2)) {
- v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
- subaddr, val);
- return -1;
- }
- return 0;
-}
-
-
-static int i2c_read_register(struct i2c_client *client, int addr, int reg)
-{
- unsigned char write[1];
- unsigned char read[1];
- struct i2c_msg msgs[2] = {
- { addr, 0, 1, write },
- { addr, I2C_M_RD, 1, read }
- };
-
- write[0] = reg;
-
- if (2 != i2c_transfer(client->adapter, msgs, 2)) {
- v4l_warn(client, "I/O error (read2)\n");
- return -1;
- }
- v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
- return read[0];
-}
-
-static void tda9875_set(struct v4l2_subdev *sd)
-{
- struct tda9875 *tda = to_state(sd);
- unsigned char a;
-
- v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
- tda->lvol, tda->rvol, tda->bass, tda->treble);
-
- a = tda->lvol & 0xff;
- tda9875_write(sd, TDA9875_MVL, a);
- a =tda->rvol & 0xff;
- tda9875_write(sd, TDA9875_MVR, a);
- a =tda->bass & 0xff;
- tda9875_write(sd, TDA9875_MBA, a);
- a =tda->treble & 0xff;
- tda9875_write(sd, TDA9875_MTR, a);
-}
-
-static void do_tda9875_init(struct v4l2_subdev *sd)
-{
- struct tda9875 *t = to_state(sd);
-
- v4l2_dbg(1, debug, sd, "In tda9875_init\n");
- tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
- tda9875_write(sd, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/
- tda9875_write(sd, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/
- tda9875_write(sd, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/
- tda9875_write(sd, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/
- tda9875_write(sd, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/
- tda9875_write(sd, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/
- tda9875_write(sd, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/
- tda9875_write(sd, TDA9875_DCR, 0x00); /*Demod config 0x00*/
- tda9875_write(sd, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/
- tda9875_write(sd, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/
- tda9875_write(sd, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/
- tda9875_write(sd, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/
-
- tda9875_write(sd, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/
- tda9875_write(sd, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */
- tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
- tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
- tda9875_write(sd, TDA9875_LOSR, 0x00); /* line out (in:mono)*/
- tda9875_write(sd, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */
- tda9875_write(sd, TDA9875_MCS, 0x44); /* Main ch select (DAC) */
- tda9875_write(sd, TDA9875_MVL, 0x03); /* Vol Main left 10dB */
- tda9875_write(sd, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/
- tda9875_write(sd, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/
- tda9875_write(sd, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/
- tda9875_write(sd, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/
- tda9875_write(sd, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/
- tda9875_write(sd, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/
- tda9875_write(sd, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/
- tda9875_write(sd, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/
-
- tda9875_write(sd, TDA9875_MUT, 0xcc); /* General mute */
-
- t->lvol = t->rvol = 0; /* 0dB */
- t->bass = 0; /* 0dB */
- t->treble = 0; /* 0dB */
- tda9875_set(sd);
-}
-
-
-static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tda9875 *t = to_state(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- {
- int left = (t->lvol+84)*606;
- int right = (t->rvol+84)*606;
-
- ctrl->value=max(left,right);
- return 0;
- }
- case V4L2_CID_AUDIO_BALANCE:
- {
- int left = (t->lvol+84)*606;
- int right = (t->rvol+84)*606;
- int volume = max(left,right);
- int balance = (32768*min(left,right))/
- (volume ? volume : 1);
- ctrl->value=(left<right)?
- (65535-balance) : balance;
- return 0;
- }
- case V4L2_CID_AUDIO_BASS:
- ctrl->value = (t->bass+12)*2427; /* min -12 max +15 */
- return 0;
- case V4L2_CID_AUDIO_TREBLE:
- ctrl->value = (t->treble+12)*2730;/* min -12 max +12 */
- return 0;
- }
- return -EINVAL;
-}
-
-static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tda9875 *t = to_state(sd);
- int chvol = 0, volume = 0, balance = 0, left, right;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- left = (t->lvol+84)*606;
- right = (t->rvol+84)*606;
-
- volume = max(left,right);
- balance = (32768*min(left,right))/
- (volume ? volume : 1);
- balance =(left<right)?
- (65535-balance) : balance;
-
- volume = ctrl->value;
-
- chvol=1;
- break;
- case V4L2_CID_AUDIO_BALANCE:
- left = (t->lvol+84)*606;
- right = (t->rvol+84)*606;
-
- volume=max(left,right);
-
- balance = ctrl->value;
-
- chvol=1;
- break;
- case V4L2_CID_AUDIO_BASS:
- t->bass = ((ctrl->value/2400)-12) & 0xff;
- if (t->bass > 15)
- t->bass = 15;
- if (t->bass < -12)
- t->bass = -12 & 0xff;
- break;
- case V4L2_CID_AUDIO_TREBLE:
- t->treble = ((ctrl->value/2700)-12) & 0xff;
- if (t->treble > 12)
- t->treble = 12;
- if (t->treble < -12)
- t->treble = -12 & 0xff;
- break;
- default:
- return -EINVAL;
- }
-
- if (chvol) {
- left = (min(65536 - balance,32768) *
- volume) / 32768;
- right = (min(balance,32768) *
- volume) / 32768;
- t->lvol = ((left/606)-84) & 0xff;
- if (t->lvol > 24)
- t->lvol = 24;
- if (t->lvol < -84)
- t->lvol = -84 & 0xff;
-
- t->rvol = ((right/606)-84) & 0xff;
- if (t->rvol > 24)
- t->rvol = 24;
- if (t->rvol < -84)
- t->rvol = -84 & 0xff;
- }
-
- tda9875_set(sd);
- return 0;
-}
-
-static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- }
- return -EINVAL;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops tda9875_core_ops = {
- .queryctrl = tda9875_queryctrl,
- .g_ctrl = tda9875_g_ctrl,
- .s_ctrl = tda9875_s_ctrl,
-};
-
-static const struct v4l2_subdev_ops tda9875_ops = {
- .core = &tda9875_core_ops,
-};
-
-/* ----------------------------------------------------------------------- */
-
-
-/* *********************** *
- * i2c interface functions *
- * *********************** */
-
-static int tda9875_checkit(struct i2c_client *client, int addr)
-{
- int dic, rev;
-
- dic = i2c_read_register(client, addr, 254);
- rev = i2c_read_register(client, addr, 255);
-
- if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
- v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
- dic == 0 ? "" : "A", rev, addr << 1);
- return 1;
- }
- v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
- addr << 1, dic, rev);
- return 0;
-}
-
-static int tda9875_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct tda9875 *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- if (!tda9875_checkit(client, client->addr))
- return -ENODEV;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
-
- do_tda9875_init(sd);
- return 0;
-}
-
-static int tda9875_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
- do_tda9875_init(sd);
- v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
- return 0;
-}
-
-static const struct i2c_device_id tda9875_id[] = {
- { "tda9875", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tda9875_id);
-
-static struct i2c_driver tda9875_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "tda9875",
- },
- .probe = tda9875_probe,
- .remove = tda9875_remove,
- .id_table = tda9875_id,
-};
-
-static __init int init_tda9875(void)
-{
- return i2c_add_driver(&tda9875_driver);
-}
-
-static __exit void exit_tda9875(void)
-{
- i2c_del_driver(&tda9875_driver);
-}
-
-module_init(init_tda9875);
-module_exit(exit_tda9875);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a1ffe18640fe..df33a1d188bb 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -512,19 +512,20 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
int buf_size, gfp_t gfp_flags,
usb_complete_t complete_fn, void *context)
{
- struct urb *urb;
- void *mem;
- int i;
+ int i = 0;
- for (i = 0; i < num; i++) {
- urb = usb_alloc_urb(0, gfp_flags);
+ for (; i < num; i++) {
+ void *mem;
+ struct urb *urb = usb_alloc_urb(0, gfp_flags);
if (urb == NULL)
return i;
mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
&urb->transfer_dma);
- if (mem == NULL)
+ if (mem == NULL) {
+ usb_free_urb(urb);
return i;
+ }
usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
mem, buf_size, complete_fn, context);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 3f0871b550ad..810eef43c216 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -407,18 +407,6 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
- if (sd) {
- /* We return errors from v4l2_subdev_call only if we have the
- callback as the .s_config is not mandatory */
- int err = v4l2_subdev_call(sd, core, s_config,
- info->irq, info->platform_data);
-
- if (err && err != -ENOIOCTLCMD) {
- v4l2_device_unregister_subdev(sd);
- sd = NULL;
- }
- }
-
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -428,9 +416,8 @@ error:
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter, const char *client_type,
- int irq, void *platform_data,
u8 addr, const unsigned short *probe_addrs)
{
struct i2c_board_info info;
@@ -440,12 +427,10 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
memset(&info, 0, sizeof(info));
strlcpy(info.type, client_type, sizeof(info.type));
info.addr = addr;
- info.irq = irq;
- info.platform_data = platform_data;
return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 8f81efcfcf56..ef66d2af0c57 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -569,7 +569,7 @@ static int user_to_new(struct v4l2_ext_control *c,
int ret;
u32 size;
- ctrl->has_new = 1;
+ ctrl->is_new = 1;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER64:
ctrl->val64 = c->value64;
@@ -1280,8 +1280,12 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (ctrl->done)
continue;
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ }
+ }
/* Skip button controls and read-only controls. */
if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
@@ -1340,12 +1344,15 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
ctrl = ref->ctrl;
memset(qc, 0, sizeof(*qc));
- qc->id = ctrl->id;
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
qc->minimum = ctrl->minimum;
qc->maximum = ctrl->maximum;
qc->default_value = ctrl->default_value;
- if (qc->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU)
qc->step = 1;
else
qc->step = ctrl->step;
@@ -1645,7 +1652,7 @@ static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
if (ctrl == NULL)
continue;
- if (ctrl->has_new) {
+ if (ctrl->is_new) {
/* Double check this: it may have changed since the
last check in try_or_set_ext_ctrls(). */
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
@@ -1719,13 +1726,13 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_lock(ctrl);
- /* Reset the 'has_new' flags of the cluster */
+ /* Reset the 'is_new' flags of the cluster */
for (j = 0; j < master->ncontrols; j++)
if (master->cluster[j])
- master->cluster[j]->has_new = 0;
+ master->cluster[j]->is_new = 0;
/* Copy the new caller-supplied control values.
- user_to_new() sets 'has_new' to 1. */
+ user_to_new() sets 'is_new' to 1. */
ret = cluster_walk(i, cs, helpers, user_to_new);
if (!ret)
@@ -1820,15 +1827,18 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
int ret;
int i;
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
v4l2_ctrl_lock(ctrl);
- /* Reset the 'has_new' flags of the cluster */
+ /* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
- master->cluster[i]->has_new = 0;
+ master->cluster[i]->is_new = 0;
ctrl->val = *val;
- ctrl->has_new = 1;
+ ctrl->is_new = 1;
ret = try_or_set_control_cluster(master, false);
if (!ret)
ret = try_or_set_control_cluster(master, true);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 359e23290a7e..341764a3a990 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -419,6 +419,10 @@ static int get_index(struct video_device *vdev)
* The registration code assigns minor numbers and device node numbers
* based on the requested type and registers the new device node with
* the kernel.
+ *
+ * This function assumes that struct video_device was zeroed when it
+ * was allocated and does not contain any stale date.
+ *
* An error is returned if no free minor or device node number could be
* found, or if the registration of the device node failed.
*
@@ -440,7 +444,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
int minor_offset = 0;
int minor_cnt = VIDEO_NUM_DEVICES;
const char *name_base;
- void *priv = vdev->dev.p;
/* A minor value of -1 marks this video device as never
having been registered */
@@ -559,10 +562,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
}
/* Part 4: register the device with sysfs */
- memset(&vdev->dev, 0, sizeof(vdev->dev));
- /* The memset above cleared the device's device_private, so
- put back the copy we made earlier. */
- vdev->dev.p = priv;
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
if (vdev->parent)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 7fe6f92af480..ce64fe16bc60 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -100,6 +100,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
is a platform bus, then it is never deleted. */
if (client)
i2c_unregister_device(client);
+ continue;
}
#endif
#if defined(CONFIG_SPI)
@@ -108,6 +109,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
if (spi)
spi_unregister_device(spi);
+ continue;
}
#endif
}
@@ -126,11 +128,19 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
WARN_ON(sd->v4l2_dev != NULL);
if (!try_module_get(sd->owner))
return -ENODEV;
+ sd->v4l2_dev = v4l2_dev;
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+ if (err)
+ return err;
+ }
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
- if (err)
+ if (err) {
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
return err;
- sd->v4l2_dev = v4l2_dev;
+ }
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
spin_unlock(&v4l2_dev->lock);
@@ -146,6 +156,8 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
spin_lock(&sd->v4l2_dev->lock);
list_del(&sd->list);
spin_unlock(&sd->v4l2_dev->lock);
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
sd->v4l2_dev = NULL;
module_put(sd->owner);
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7e47f15f350d..f51327ef6757 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1659,20 +1659,24 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dbg_register *p = arg;
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else if (ops->vidioc_g_register)
- ret = ops->vidioc_g_register(file, fh, p);
+ if (ops->vidioc_g_register) {
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_g_register(file, fh, p);
+ }
break;
}
case VIDIOC_DBG_S_REGISTER:
{
struct v4l2_dbg_register *p = arg;
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else if (ops->vidioc_s_register)
- ret = ops->vidioc_s_register(file, fh, p);
+ if (ops->vidioc_s_register) {
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_s_register(file, fh, p);
+ }
break;
}
#endif
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 019ee206cbee..fa35639d0c15 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -937,6 +937,7 @@ static void w9966_term(struct w9966 *cam)
parport_unregister_device(cam->pdev);
w9966_set_state(cam, W9966_STATE_PDEV, 0);
}
+ memset(cam, 0, sizeof(*cam));
}
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 9cdc3bb15b15..9f2bac519647 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1041,7 +1041,7 @@ zr36057_init (struct zoran *zr)
/* allocate memory *before* doing anything to the hardware
* in case allocation fails */
zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
- zr->video_dev = kmalloc(sizeof(struct video_device), GFP_KERNEL);
+ zr->video_dev = video_device_alloc();
if (!zr->stat_com || !zr->video_dev) {
dprintk(1,
KERN_ERR
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 16fe4f9b719b..03823327db25 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2864,7 +2864,7 @@ config MLX4_CORE
default n
config MLX4_DEBUG
- bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+ bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
depends on MLX4_CORE
default y
---help---
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 62d6f88cbab5..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1644,7 +1644,7 @@ ks8695_cleanup(void)
module_init(ks8695_init);
module_exit(ks8695_cleanup);
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf672009948..23f2ab0f2fa8 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
*/
static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
{
- u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+ u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
~GIGA_CR_1000T_SPEED_MASK;
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
}
if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
- atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+ atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
return -1;
return 0;
}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
"Error Setting up Auto-Negotiation\n");
return ret_val;
}
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
break;
case MEDIA_TYPE_100M_FULL:
- mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
break;
case MEDIA_TYPE_100M_HALF:
- mii_bmcr_data |= BMCR_SPEED_100;
+ mii_bmcr_data |= BMCR_SPEED100;
break;
case MEDIA_TYPE_10M_FULL:
- mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_FULLDPLX;
break;
case MEDIA_TYPE_10M_HALF:
- mii_bmcr_data |= BMCR_SPEED_10;
break;
default:
if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
err = atl1c_phy_setup_adv(hw);
if (err)
return err;
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa1..655fc6c4a8a4 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
-/* PHY Control Register */
-#define MII_BMCR 0x00
-#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN 0x0800 /* Power down */
-#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK 0x2040
-#define BMCR_SPEED_1000 0x0040
-#define BMCR_SPEED_100 0x2000
-#define BMCR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_BMSR 0x01
-#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
/* Autoneg Advertisement Register */
-#define MII_ADVERTISE 0x04
-#define ADVERTISE_SPEED_MASK 0x01E0
-#define ADVERTISE_DEFAULT_CAP 0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+ (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
/* 1000BASE-T Control Register */
-#define MII_GIGA_CR 0x09
#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b948..1209297433b8 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
- adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+ adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
- adv4 |= MII_AR_10T_HD_CAPS;
+ adv4 |= ADVERTISE_10HALF;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
- adv4 |= MII_AR_10T_FD_CAPS;
+ adv4 |= ADVERTISE_10FULL;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
- adv4 |= MII_AR_100TX_HD_CAPS;
+ adv4 |= ADVERTISE_100HALF;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
- adv4 |= MII_AR_100TX_FD_CAPS;
+ adv4 |= ADVERTISE_100FULL;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
- adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+ adv9 |= ADVERTISE_1000FULL;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8c..923063d2e5bb 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
- mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
- MII_AR_10T_FD_CAPS |
- MII_AR_100TX_HD_CAPS |
- MII_AR_100TX_FD_CAPS);
- hw->autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL |
- ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ mii_autoneg_adv_reg |= ADVERTISE_ALL;
+ hw->autoneg_advertised = ADVERTISE_ALL;
if (hw->nic_type == athr_l1e) {
- mii_1000t_ctrl_reg |=
- MII_AT001_CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
- mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
- mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
- mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
- mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
- mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int ret_val;
u16 phy_data;
- phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+ phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ err = atl1e_write_phy_reg(hw, MII_CTRL1000,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
- MII_CR_RESET | MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
+ BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
return err;
}
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cfa..74df16aef793 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/***************************** MII definition ***************************************/
/* PHY Common Register */
-#define MII_BMCR 0x00
-#define MII_BMSR 0x01
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
-#define MII_ADVERTISE 0x04
-#define MII_LPA 0x05
-#define MII_EXPANSION 0x06
-#define MII_AT001_CR 0x09
-#define MII_AT001_SR 0x0A
-#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
-#define MII_RERRCOUNTER 0x15
-#define MII_SREVISION 0x16
-#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
-#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
-#define MII_TPISTATUS 0x1b
-#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK 0x2040
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
-#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
-#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
-#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
-#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
-#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
-#define MII_LPA_PAUSE 0x0400 /* PAUSE */
-#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
-#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
-#define MII_LPA_NPAGE 0x8000 /* Next page bit */
-
/* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
-#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-#define MII_AR_SPEED_MASK 0x01E0
-#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK 0
/* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+ (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394e..bf7500ccd73f 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- mii_advertise_data = MII_AR_10T_HD_CAPS;
+ mii_advertise_data = ADVERTISE_10HALF;
- if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+ if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe4..9f356d5d0f33 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
}
unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
if (++unmap_cons >= unmap_q->q_depth)
break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
if (++unmap_cons >= unmap_q->q_depth)
break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]);
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
}
@@ -340,19 +340,22 @@ static void
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
int unmap_cons;
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
- skb = unmap_q->unmap_array[unmap_cons].skb;
+ skb = unmap_array[unmap_cons].skb;
if (!skb)
continue;
- unmap_q->unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
- unmap_array[unmap_cons],
- dma_addr), rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
- dma_addr = pci_map_single(bnad->pcidev, skb->data,
- rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- u32 flags;
+ u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb));
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_q->
- unmap_array[unmap_q->
- consumer_index],
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
- rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa);
- pci_free_consistent(bnad->pcidev,
- mem_info->mdl[i].len,
- mem_info->mdl[i].kva, dma_pa);
+ dma_free_coherent(&bnad->pcidev->dev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
} else
kfree(mem_info->mdl[i].kva);
}
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
- pci_alloc_consistent(bnad->pcidev,
- mem_info->len, &dma_pa);
+ dma_alloc_coherent(&bnad->pcidev->dev,
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb));
- dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr =
- pci_map_page(bnad->pcidev, frag->page,
- frag->page_offset, size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+ frag->page_offset, size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err)
goto release_regions;
}
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557def..a89117fa4970 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr)
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464a..2a961b7f7e17 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -7553,6 +7555,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
!(data & ETH_FLAG_RXVLAN))
return -EINVAL;
+ /* TSO with VLAN tag won't work with current firmware */
+ if (!(data & ETH_FLAG_TXVLAN))
+ return -EINVAL;
+
rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
ETH_FLAG_TXVLAN);
if (rc)
@@ -7962,11 +7968,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
/* AER (Advanced Error Reporting) hooks */
err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
- "failed 0x%x\n", err);
- /* non-fatal, continue */
- }
+ if (!err)
+ bp->flags |= BNX2_FLAG_AER_ENABLED;
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8232,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
if (bp->regview) {
iounmap(bp->regview);
@@ -8312,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}
@@ -8418,8 +8423,10 @@ bnx2_remove_one(struct pci_dev *pdev)
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
free_netdev(dev);
@@ -8535,7 +8542,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_PCIE))
+ if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe9..7a5e88f831f6 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
#define BNX2_CP_SCRATCH 0x001a0000
+#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
+
/*
* mcp_reg definition
@@ -6741,6 +6743,7 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
+#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
@@ -6758,7 +6761,7 @@ struct bnx2 {
u32 tx_wake_thresh;
#ifdef BCM_CNIC
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
#endif
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c9436..ff87ec33d00e 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-3"
-#define DRV_MODULE_RELDATE "2010/12/21"
+#define DRV_MODULE_VERSION "1.62.11-0"
+#define DRV_MODULE_RELDATE "2011/01/31"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -976,8 +976,12 @@ struct bnx2x {
#define MF_FUNC_DIS 0x1000
#define FCOE_MACS_SET 0x2000
#define NO_FCOE_FLAG 0x4000
+#define NO_ISCSI_OOO_FLAG 0x8000
+#define NO_ISCSI_FLAG 0x10000
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@@ -1110,7 +1114,7 @@ struct bnx2x {
#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
@@ -1125,7 +1129,6 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
- u8 iscsi_mac[ETH_ALEN];
u8 fip_mac[ETH_ALEN];
#endif
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f63989..be503cc0a50b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
#include "bnx2x_fw_defs.h"
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
struct license_key {
u32 reserved[6];
-#if defined(__BIG_ENDIAN)
- u16 max_iscsi_init_conn;
- u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
- u16 max_iscsi_trgt_conn;
- u16 max_iscsi_init_conn;
-#endif
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
- u32 reserved_a[6];
-};
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+ u32 reserved_b[4];
+};
#define PORT_0 0
#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
- u32 Reserved0[16]; /* 0x158 */
-
+ u32 Reserved0[3]; /* 0x158 */
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT 0
+#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+ u32 Reserved01[12]; /* 0x158 */
/* for external PHY, or forced mode or during AN */
u16 xgxs_config_rx[4]; /* 0x198 */
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 Reserved1[56]; /* 0x1A8 */
u32 default_cfg; /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /*
+ * When KR link is required to be set to force which is not
+ * KR-compliant, this parameter determine what is the trigger for it.
+ * When GPIO is selected, low input will force the speed. Currently
+ * default speed is 1G. In the future, it may be widen to select the
+ * forced speed in with another parameter. Note when force-1G is
+ * enabled, it override option 56: Link Speed option.
+ */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
/* Enable BAM on KR */
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+ /* Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -352,6 +443,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
/* forced only */
#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ /* Indicate whether to swap the external phy polarity */
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
u32 external_phy_config;
#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
@@ -377,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f391..f2f367d4e74d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
/********************************************************/
#define ETH_HLEN 14
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Shortcut definitions */
@@ -79,7 +80,7 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
#define PHY_XGXS_FLAG 0x1
#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
/* */
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
- #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -153,15 +154,15 @@
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
- #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
-#define SFP_EEPROM_OPTIONS_ADDR 0x40
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE 2
+#define SFP_EEPROM_OPTIONS_SIZE 2
-#define EDC_MODE_LINEAR 0x0022
-#define EDC_MODE_LIMITING 0x0044
-#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_write(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_read(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
- /**
+ /*
* mapping between entry priority to client number (0,1,2 -debug and
* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
* 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries, 3 -
* COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* defines which entries (clients) are subjected to WFQ arbitration */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
- /**
- * For strict priority entries defines the number of consecutive
- * slots for the highest priority.
- */
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
- /**
+ /*
* mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
/* ETS mode disable */
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
- /**
+ /*
* If ETS mode is enabled (there is no strict priority) defines a WFQ
* weight for COS0/COS1.
*/
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
/* ETS disabled configuration */
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- /**
- * defines which entries (clients) are subjected to WFQ arbitration
- * COS0 0x8
- * COS1 0x10
- */
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
- /**
- * mapping between the ARB_CREDIT_WEIGHT registers and actual
- * client numbers (WEIGHT_0 does not actually have to represent
- * client 0)
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
- */
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
- /**
- * Bitmap of 5bits length. Each bit specifies whether the entry behaves
- * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
- * entry, 4 - COS1 entry.
- * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
- * bit4 bit3 bit2 bit1 bit0
- * MCP and debug are strict
- */
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
if ((0 == total_bw) ||
(0 == cos0_bw) ||
(0 == cos1_bw)) {
- DP(NETIF_MSG_LINK,
- "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
u32 val = 0;
DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries,
* 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
- /**
+ /*
* For strict priority entries defines the number of consecutive slots
* for the highest priority.
*/
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
- /**
- * mapping between entry priority to client number (0,1,2 -debug and
- * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
- * 3bits client num.
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
- * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
- */
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
val = (0 == strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
/* MAC/PBF section */
/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
/* reset and unreset the emac core */
struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
u16 timeout;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
udelay(5);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
/* init emac - use read-modify-write */
/* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
}
static u8 bnx2x_emac_enable(struct link_params *params,
- struct link_vars *vars, u8 lb)
+ struct link_vars *vars, u8 lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
- /* for paladium */
- if (CHIP_REV_IS_EMUL(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
- }
- /* for fpga */
- else
-
- if (CHIP_REV_IS_FPGA(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
- 0);
- } else
/* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
DP(NETIF_MSG_LINK, "XGXS\n");
/* select the master lanes (out of 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
- port*4, ser_lane);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
/* select XGXS */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes\n");
/* select SerDes */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 0);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
}
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_RESET);
+ EMAC_RX_MODE_RESET);
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_RESET);
+ EMAC_TX_MODE_RESET);
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE,
- (val | EMAC_MODE_PORT_GMII));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
- /**
- * Setting this bit causes MAC control frames (except for pause
- * frames) to be passed on for processing. This setting has no
- * affect on the operation of the pause frames. This bit effects
- * all packets regardless of RX Parser packet sorting logic.
- * Turn the PFC off to make sure we are in Xon state before
- * enabling it.
- */
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
- if (CHIP_REV_IS_EMUL(bp)) {
- /* take the BigMac out of reset */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* enable access for bmac registers */
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
- } else
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
vars->mac_type = MAC_TYPE_EMAC;
return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
udelay(30);
/* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
- /**
- * Set Time (based unit is 512 bit time) between automatic
- * re-sending of PP packets amd enable automatic re-send of
- * Per-Priroity Packet as long as pp_gen is asserted and
- * pp_disable is low.
- */
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
val = 0x8000;
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
- wb_data, 2);
+ wb_data, 2);
/* mac control */
val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /* The number of free blocks below which the pause signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
- /* The number of free blocks above which the pause signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
- /* The number of free blocks below which the full signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
- /* The number of free blocks above which the full signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /**
+ /*
* The number of free blocks below which the pause signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
- /**
+ /*
* The number of free blocks above which the pause signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
- /**
+ /*
* The number of free blocks below which the full signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
- /**
+ /*
* The number of free blocks above which the full signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
}
}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
- /**
+ /*
* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask = REG_RD(bp,
port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK);
- /**
+ /*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
*/
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
- /**
+ /*
* The PFC and pause are orthogonal to one another, meaning when
* PFC is enabled, the pause are disabled, and when PFC is
* disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
/* XGXS control */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
/* tx MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
/* mac control */
val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
}
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
/* configure safc */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- /* fix for emulation */
- if (CHIP_REV_IS_EMUL(bp)) {
- wb_data[0] = 0xf000;
- wb_data[1] = 0;
- REG_WR_DMAE(bp,
- bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
- wb_data, 2);
- }
-
return 0;
}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
udelay(30);
/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
udelay(30);
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
u32 val;
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
struct bnx2x *bp = params->bp;
REG_WR(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].link_status),
- link_status);
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
}
static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
if (CHIP_IS_E2(bp)) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
} else {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
}
static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
- u32 line_speed)
+ u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
static u32 bnx2x_get_emac_base(struct bnx2x *bp,
u32 mdc_mdio_access, u8 port)
{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
/* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
}
}
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT));
+ EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
}
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the read request on it
*/
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the write request on it
*/
@@ -1573,19 +1542,18 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
offset = phy->addr + ser_lane;
if (CHIP_IS_E2(bp))
- aer_val = 0x2800 + offset - 1;
+ aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0x3800);
}
/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
bnx2x_set_serdes_access(bp, port);
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- port*0x10,
- DEFAULT_PHY_DEV_ADDR);
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
}
static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- params->phy[INT_PHY].def_md_devad);
+ params->phy[INT_PHY].def_md_devad);
}
void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g;
u8 port = params->port;
vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
/* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
- ser_lane = ((params->lane_config &
+ ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- &new_master_ln);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2 ,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- (new_master_ln | ser_lane));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
}
static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
-
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
}
}
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
return -EINVAL;
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
- /* Each two bits represents a lane number:
- No swap is 0123 => 0x1b no need to enable the swap */
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
u16 ser_lane, rx_lane_swap, tx_lane_swap;
ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
tx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP,
- (rx_lane_swap |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP,
- (tx_lane_swap |
- MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 control2;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
phy->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
}
}
static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+ struct link_vars *vars,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* CL37 Autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_UCTRL,
- 0xe);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
/* CL73 Autoneg Enabled */
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
- /* program speed
- - needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP3, 0x400);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
- /* resolve pause mode and advertisement
- * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+ /*
+ * Resolve pause mode and advertisement.
+ * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+ */
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- } else {
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- }
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_TX:
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
struct link_params *params,
- u16 ieee_fc)
+ u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
}
static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- &mii_control);
-
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- (mii_control |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
}
}
static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- &control1);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
/* set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- control1);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
/* if forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
} else { /* AN mode */
/* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
break;
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
u16 pd_10g, status2_1000x;
if (phy->req_line_speed != SPEED_AUTO_NEG)
return 0;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
params->port);
return 1;
}
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
- &pd_10g);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
>> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
pause_result);
}
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_RX0,
- MDIO_RX0_RX_STATUS,
- &rx_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_USTAT1,
- &ustat_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
"ustat_val(0x8371) = 0x%x\n", ustat_val);
return;
}
- /* Step 3: Check CL37 Message Pages received to indicate LP
- supports only CL37 */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_REMOTE_PHY,
- MDIO_REMOTE_PHY_MISC_RX_STATUS,
- &cl37_fsm_recieved);
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_recieved);
if ((cl37_fsm_recieved &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
cl37_fsm_recieved);
return;
}
- /* The combined cl37/cl73 fsm state information indicating that we are
- connected to a device which does not support cl73, but does support
- cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
/* Restart CL37 autoneg */
bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed , gp_status;
+ u16 new_line_speed, gp_status;
u8 rc = 0;
/* Read gp_status */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_LP_UP2, &lp_up2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
/* bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, &tx_driver);
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
/* replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, tx_driver);
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
- EMAC_REG_EMAC_MODE,
- (EMAC_MODE_25G_MODE |
- EMAC_MODE_PORT_MII_10M |
- EMAC_MODE_HALF_DUPLEX));
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
- GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
- mode);
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_RX0_RX_EQ_BOOST,
phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER,
phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
- params->loopback_mode == LOOPBACK_EXT)) {
+ params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
/* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisment(phy, params,
- vars->ieee_fc);
+ vars->ieee_fc);
/* enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
}
static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
- struct bnx2x_phy *phy)
+ struct bnx2x_phy *phy,
+ struct link_params *params)
{
u16 cnt, ctrl;
/* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
break;
msleep(1);
}
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
return cnt;
}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
u32 mask;
struct bnx2x *bp = params->bp;
- /* setting the status to report on link up
- for either XGXS or SerDes */
-
+ /* Setting the status to report on link up for either XGXS or SerDes */
if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
{
u32 latch_status = 0;
- /**
+ /*
* Disable the MI INT ( external phy int ) by writing 1 to the
* status register. Link down indication is high-active-signal,
* so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
/* For all latched-signal=up : Re-Arm Latch signals */
REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ (latch_status & 0xfffe) | (latch_status & 1));
}
/* For all latched-signal=up,Write original_signal to status */
}
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- /* first reset all status
- * we assume only one line will be change at a time */
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the 10G link interrupt by writing 1 to the
+ * status register
*/
DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
NIG_STATUS_XGXS0_LINK10G);
} else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
+ /*
+ * Disable the link interrupt by writing 1 to the
+ * relevant lane in the status register
*/
u32 ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the link interrupt by writing 1 to the status
+ * register
*/
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
}
if ((params->num_phys == MAX_PHYS) &&
(params->phy[EXT_PHY2].ver_addr != 0)) {
- spirom_ver = REG_RD(bp,
- params->phy[EXT_PHY2].ver_addr);
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
if (params->phy[EXT_PHY2].format_fw_ver) {
*ver_p = '/';
ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
/* change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ port*0x18));
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
msleep(200);
/* set aer mmd back */
bnx2x_set_aer_mmd_xgxs(params, phy);
/* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
-
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,56 +3134,71 @@ u8 bnx2x_set_led(struct link_params *params,
case LED_MODE_OFF:
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
+ SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
- /**
+ /*
* For all other phys, OPER mode is same as ON, so in case
* link is down, do nothing
- **/
+ */
if (!vars->link_up)
break;
case LED_MODE_ON:
- if (SINGLE_MEDIA_DIRECT(params)) {
- /**
- * This is a work-around for HW issue found when link
- * is up in CL73
- */
+ if (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
} else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
}
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
+ LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
+ port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
(speed == SPEED_100) ||
(speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
+ + port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
+ port*4, 0);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ port*4, 1);
}
break;
@@ -3215,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
}
-/**
+/*
* This function comes to reflect the actual link state read DIRECTLY from the
* HW
*/
@@ -3227,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
- CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
/* link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
@@ -3274,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
u8 rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
- /**
- * In case of external phy existence, the line speed would be the
- * line speed linked up by the external phy. In case it is direct
- * only, then the line_speed during initialization will be
- * equal to the req_line_speed
- */
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
vars->line_speed = params->phy[INT_PHY].req_line_speed;
- /**
+ /*
* Initialize the internal phy in case this is a direct board
* (no external phys), or this board has external phy which requires
* to first.
@@ -3310,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
if (!non_ext_phy)
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- /**
+ /*
* No need to initialize second phy in case of first
* phy only selection. In case of second phy, we do
* need to initialize the first phy, since they are
* connected.
- **/
+ */
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Not initializing"
- "second phy\n");
+ DP(NETIF_MSG_LINK, "Ignoring second phy\n");
continue;
}
params->phy[phy_index].config_init(
@@ -3342,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
/* reset the SerDes/XGXS */
- REG_WR(params->bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (params->port*16)));
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
}
static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3358,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
@@ -3393,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
/* reset BigMac */
bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
return 0;
}
@@ -3446,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
msleep(20);
return rc;
}
-/**
+/*
* The bnx2x_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -3485,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
+ port*0x18) > 0);
DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3499,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
/* disable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- /**
- * Step 1:
- * Check external link change only for external phys, and apply
- * priority selection between them in case the link on both phys
- * is up. Note that the instead of the common vars, a temporary
- * vars argument is used since each phy may have different link/
- * speed/duplex result
- */
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3531,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
switch (bnx2x_phy_selection(params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through itself only.
* Its not clear how to reset the link on the second phy
- **/
+ */
active_external_phy = EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through the second PHY.
- **/
+ */
active_external_phy = EXT_PHY2;
break;
default:
- /**
+ /*
* Link indication on both PHYs with the following cases
* is invalid:
* - FIRST_PHY means that second phy wasn't initialized,
@@ -3554,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
* - DEFAULT should be overriden during initialiazation
- **/
+ */
DP(NETIF_MSG_LINK, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!\n",
params->multi_phy_config);
@@ -3564,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
}
}
prev_line_speed = vars->line_speed;
- /**
- * Step 2:
- * Read the status of the internal phy. In case of
- * DIRECT_SINGLE_MEDIA board, this link is the external link,
- * otherwise this is the link between the 577xx and the first
- * external phy
- */
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
if (params->phy[INT_PHY].read_status)
params->phy[INT_PHY].read_status(
&params->phy[INT_PHY],
params, vars);
- /**
+ /*
* The INT_PHY flow control reside in the vars. This include the
* case where the speed or flow control are not set to AUTO.
* Otherwise, the active external phy flow control result is set
@@ -3585,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
*/
if (active_external_phy > INT_PHY) {
vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
- /**
+ /*
* Link speed is taken from the XGXS. AN and FC result from
* the external phy.
*/
vars->link_status |= phy_vars[active_external_phy].link_status;
- /**
+ /*
* if active_external_phy is first PHY and link is up - disable
* disable TX on second external PHY
*/
@@ -3627,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
" ext_phy_line_speed = %d\n", vars->flow_ctrl,
vars->link_status, ext_phy_line_speed);
- /**
+ /*
* Upon link speed change set the NIG into drain mode. Comes to
* deals with possible FIFO glitch due to clk change when speed
* is decreased without link down indicator
@@ -3642,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
ext_phy_line_speed);
vars->phy_link_up = 0;
} else if (prev_line_speed != vars->line_speed) {
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
msleep(1);
}
}
@@ -3658,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
- /**
- * In case external phy link is up, and internal link is down
- * (not initialized yet probably after link initialization, it
- * needs to be initialized.
- * Note that after link down-up as result of cable plug, the xgxs
- * link would probably become up again without the need
- * initialize it
- */
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
if (!(SINGLE_MEDIA_DIRECT(params))) {
DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
" init_preceding = %d\n", ext_phy_link_up,
@@ -3685,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars);
}
}
- /**
- * Link is up only if both local phy and external phy (in case of
- * non-direct board) are up
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
*/
vars->link_up = (vars->phy_link_up &&
(ext_phy_link_up ||
@@ -3708,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3731,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
u16 fw_ver1, fw_ver2;
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
phy->ver_addr);
}
@@ -3754,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3785,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
pause_result = (ld_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
@@ -3854,90 +3847,82 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
pause_result);
}
}
-
-static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
struct bnx2x_phy *phy,
u8 port)
{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ u8 rc = 0;
+
/* Boot port from external ROM */
/* EDC grst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
/* ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 120ms for code download via SPI port */
- msleep(120);
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
- /* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- bnx2x_save_bcm_spirom_ver(bp, phy, port);
-}
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
- struct bnx2x_phy *phy)
-{
- u16 val;
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- if (val == 0) {
- /* Mustn't set low power mode in 8073 A0 */
- return;
- }
+ msleep(1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
- /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
- bnx2x_cl45_read(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val &= ~(1<<13);
+ /* Clear ser_boot_ctl bit */
bnx2x_cl45_write(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
- /* PLL controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
- /* Tx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
- /* Rx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
- /* Enable PLL sequencer (use read-modify-write to set bit 13) */
- bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val |= (1<<13);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+ return rc;
}
/******************************************************************/
@@ -3950,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
/* Read 8073 HW revision*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val != 1) {
/* No need to workaround in 8073 A1 */
@@ -3959,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
}
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
/* SNR should be applied only for version 0x102 */
if (val != 0x102)
@@ -3974,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt, cnt1 ;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val > 0) {
/* No need to workaround in 8073 A1 */
@@ -3983,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
}
/* XAUI workaround in 8073 A0: */
- /* After loading the boot ROM and restarting Autoneg,
- poll Dev1, Reg $C820: */
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
for (cnt = 0; cnt < 1000; cnt++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &val);
- /* If bit [14] = 0 or bit [13] = 0, continue on with
- system initialization (XAUI work-around not required,
- as these bits indicate 2.5G or 1G link up). */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
if (!(val & (1<<14)) || !(val & (1<<13))) {
DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
return 0;
} else if (!(val & (1<<15))) {
- DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
- /* If bit 15 is 0, then poll Dev1, Reg $C841 until
- it's MSB (bit 15) goes to 1 (indicating that the
- XAUI workaround has completed),
- then continue on with system initialization.*/
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -4085,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
gpio_port = params->port;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
/* enable LASI */
bnx2x_cl45_write(bp, phy,
@@ -4098,8 +4089,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_8073_set_pause_cl37(params, phy, vars);
- bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
@@ -4108,6 +4097,21 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -4135,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
val = (1<<7);
} else if (phy->req_line_speed == SPEED_2500) {
val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisment
+ */
} else
val = (1<<5);
} else {
@@ -4145,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
- /* Note that 2.5G works only when
- used with 1G advertisment */
+ /* Note that 2.5G works only when used with 1G advertisment */
if (phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4186,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
if (bnx2x_8073_is_snr_needed(bp, phy))
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4272,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
0x26BC);
@@ -4314,8 +4320,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
}
if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -4332,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
}
/******************************************************************/
@@ -4347,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "init 8705\n");
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4402,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* SFP+ module Section */
/******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
struct bnx2x_phy *phy,
- u8 port,
u8 tx_en)
{
u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
- DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
- tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val);
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
- if (tx_en)
- val &= ~(1<<15);
- else
- val |= (1<<15);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val);
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
}
static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -4443,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | 0xa000));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ addr);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x2c0f);
+ 0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4477,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4496,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -4509,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- ((byte_cnt < 2) ? 2 : byte_cnt));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
/* Set the destination address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- 0x8004,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x8002);
- /* Wait appropriate time for two-wire command to finish before
- polling the status register */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
msleep(1);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4555,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"Got bad status 0x%x when reading from SFP+ EEPROM\n",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
- return -EINVAL;
+ return -EFAULT;
}
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4579,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
return -EINVAL;
}
static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
struct link_params *params,
- u16 *edc_mode)
+ u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
u8 val, check_limiting_mode = 0;
@@ -4615,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
u8 copper_module_type;
- /* Check if its active cable( includes SFP+ module)
- of passive cable*/
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4675,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-/* This function read the relevant field from the module ( SFP+ ),
- and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4725,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_VENDOR_NAME_ADDR,
- SFP_EEPROM_VENDOR_NAME_SIZE,
- (u8 *)vendor_name))
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_PART_NO_ADDR,
- SFP_EEPROM_PART_NO_SIZE,
- (u8 *)vendor_pn))
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
vendor_pn[0] = '\0';
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
- " Port %d from %s part number %s\n",
- params->port, vendor_name, vendor_pn);
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
@@ -4754,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
u8 val;
struct bnx2x *bp = params->bp;
u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
for (timeout = 0; timeout < 60; timeout++) {
if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
== 0) {
@@ -4774,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
/* Make sure GPIOs are not using for LED mode */
u16 val;
/*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
* operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
* output
* Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
* Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
* where the 1st bit is the over-current(only input), and 2nd bit is
* for power( only output )
- */
-
- /*
+ *
* In case of NOC feature is disabled and power is up, set GPIO control
* as input to enable listening of over-current indication
*/
@@ -4812,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
u16 cur_limiting_mode;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &cur_limiting_mode);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
cur_limiting_mode);
if (edc_mode == EDC_MODE_LIMITING) {
- DP(NETIF_MSG_LINK,
- "Setting LIMITING MODE\n");
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
@@ -4829,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
- /* Changing to LRM mode takes quite few seconds.
- So do it only if current mode is limiting
- ( default is LRM )*/
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- 0x128);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL0,
- 0x4008);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0xaaaa);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
}
return 0;
}
static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
struct bnx2x_phy *phy,
- u16 edc_mode)
+ u16 edc_mode)
{
u16 phy_identifier;
u16 rom_ver2_val;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &phy_identifier);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier & ~(1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &rom_ver2_val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier | (1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
return 0;
}
@@ -4897,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
switch (action) {
case DISABLE_TX:
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
break;
case ENABLE_TX:
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4910,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
}
}
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4927,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(phy, params) !=
- 0) {
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4946,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
}
} else {
/* Turn off fault module-detected led */
- DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_LOW,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
}
/* power up the SFP module */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
bnx2x_8727_power_module(bp, phy, 1);
- /* Check and set limiting mode / LRM mode on 8726.
- On 8727 it is done automatically */
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
@@ -4969,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
else
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
return rc;
}
@@ -4984,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
u8 port = params->port;
/* Set valid module led off */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
- /* Get current gpio val refelecting module plugged in / out*/
+ /* Get current gpio val reflecting module plugged in / out*/
gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
@@ -5004,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
port);
- /* Module was plugged out. */
- /* Disable transmit for this module */
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -5051,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
" link_status 0x%x\n", rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- * 1 is set
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
*/
link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
if (link_up) {
@@ -5062,6 +5173,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
else
vars->line_speed = SPEED_10000;
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -5073,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 cnt, val;
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait until fw is loaded */
for (cnt = 0; cnt < 100; cnt++) {
@@ -5147,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
0x0004);
}
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
return 0;
}
@@ -5181,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
/* Set soft reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
msleep(200);
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5235,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
u32 val;
u32 swap_val, swap_override, aeu_gpio_mask, offset;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_8726_external_rom_boot(phy, params);
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
bnx2x_sfp_module_detection(phy, params);
if (phy->req_line_speed == SPEED_1000) {
@@ -5284,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
bnx2x_cl45_write(bp, phy,
@@ -5317,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
/* Set GPIO3 to trigger SFP+ module insertion/removal */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
/* The GPIO should be swapped if the swap register is set and active */
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5408,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
u32 swap_val, swap_override;
u8 port;
- /**
+ /*
* The PHY reset is controlled by GPIO 1. Fake the port number
* to cancel the swap done in set_gpio()
*/
@@ -5417,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
port = (swap_val && swap_override) ^ 1;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 tmp1, val, mod_abs;
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
u16 rx_alarm_ctrl_val;
u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
lasi_ctrl_val = 0x0004;
@@ -5443,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
@@ -5465,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
if (phy->flags & FLAGS_NOC)
val |= (3<<5);
- /**
+ /*
* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
* status which reflect SFP+ module over-current
*/
@@ -5492,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /**
+ /*
* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -5518,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
} else {
- /**
+ /*
* Since the 8727 has only single reset pin, need to set the 10G
* registers although it is default
*/
@@ -5534,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
0x0008);
}
- /* Set 2-wire transfer rate of SFP+ module EEPROM
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
*/
@@ -5557,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
phy->tx_preemphasis[1]);
}
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
return 0;
}
@@ -5570,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
port_feature_config[params->port].
config));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
/* Module is absent */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is absent\n");
- /* 1. Set mod_abs to detect next module
- presence event
- 2. Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed */
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
} else {
/* Module is present */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is present\n");
- /* First thing, disable transmitter,
- and if the module is ok, the
- module_detection will enable it*/
-
- /* 1. Set mod_abs to detect next module
- absent event ( bit 8)
- 2. Restore the default polarity of the OPRXLOS signal and
- this signal will then correctly indicate the presence or
- absence of the Rx signal. (bit 9) */
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
mod_abs |= (1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs |= (1<<9);
@@ -5617,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed. This is need to be done
- before calling the module detection, otherwise it will clear
- the link update alarm */
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5628,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
bnx2x_sfp_module_detection(phy, params);
@@ -5637,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
- /* No need to check link status in case of
- module plugged in/out */
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
}
static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5675,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /**
+ /*
* If a module is present and there is need to check
* for over current
*/
@@ -5695,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
" Please remove the SFP+ module and"
" restart the system to clear this"
" error.\n",
- params->port);
-
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
+ params->port);
+ /* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5743,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
link_up = 1;
vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
link_up = 1;
vars->line_speed = SPEED_1000;
@@ -5758,15 +5917,18 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "port %x: External link is down\n",
params->port);
}
- if (link_up)
+ if (link_up) {
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
if ((DUAL_MEDIA(params)) &&
(phy->req_line_speed == SPEED_1000)) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_PCS_GP, &val1);
- /**
+ /*
* In case of dual-media board and 1G, power up the XAUI side,
* otherwise power it down. For 10G it is done automatically
*/
@@ -5786,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
/* Disable Transmitter */
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
/* Clear LASI */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
@@ -5798,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct link_params *params)
{
- u16 val, fw_ver1, fw_ver2, cnt;
+ u16 val, fw_ver1, fw_ver2, cnt, adj;
struct bnx2x *bp = params->bp;
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5824,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5841,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
}
/* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
/* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
@@ -5852,33 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, adj;
+
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
val &= 0xFE00;
val |= 0x0092;
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
+ MDIO_PMA_REG_8481_LED1_MASK + adj,
0x80);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
+ MDIO_PMA_REG_8481_LED2_MASK + adj,
0x18);
+ /* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0040);
+ MDIO_PMA_REG_8481_LED3_MASK + adj,
+ 0x0006);
+
+ /* Select the closest activity blink rate to that in 10/100/1000 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK + adj,
+ 0);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -5892,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
-
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -6017,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6033,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
- u16 val;
+ u16 val, adj;
u16 temp;
- u32 actual_phy_selection;
+ u32 actual_phy_selection, cms_enable;
u8 rc = 0;
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = 3;
msleep(1);
if (CHIP_IS_E2(bp))
@@ -6048,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
- /* BCM84823 requires that XGXS links up first @ 10G for normal
- behavior */
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
temp = vars->line_speed;
vars->line_speed = SPEED_10000;
bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6062,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, &val);
+ MDIO_CTL_REG_84823_MEDIA + adj, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6095,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, val);
+ MDIO_CTL_REG_84823_MEDIA + adj, val);
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
@@ -6103,29 +6297,50 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
bnx2x_save_848xx_spirom_version(phy, params);
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val, val1, val2;
+ u16 val, val1, val2, adj;
u8 link_up = 0;
+ /* Reg offset adjustment for 84833 */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, 0xFFFA, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
&val2);
DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
/* Check link 10G */
if (val2 & (1<<11)) {
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
link_up = 1;
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
} else { /* Check Legacy speed link */
@@ -6203,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
}
static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6227,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
else
port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6283,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
} else {
bnx2x_cl45_write(bp, phy,
@@ -6324,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
val |= 0x2492;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
}
break;
@@ -6370,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
&val);
if (!((val &
- MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
- >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
- DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6381,30 +6596,42 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x10);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x80);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x98);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x40);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
} else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x80);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
}
break;
}
@@ -6431,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6481,14 +6708,13 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
+ /* if link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
&val2);
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
val2, (val2 & (1<<14)));
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -6516,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
for (cnt = 0; cnt < 10; cnt++) {
msleep(50);
/* Writes a self-clearing reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
/* Wait for clear */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
if ((val & (1<<15)) == 0)
break;
@@ -6540,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
/* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
/* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
}
static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6585,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
.supported = 0,
.media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)NULL,
@@ -6622,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_serdes,
@@ -6659,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6690,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
.media_type = ETH_PHY_BASE_T,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6721,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_8073_config_init,
@@ -6932,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
/*****************************************************************/
/* */
/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -6945,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
/* Get the 4 lanes xgxs config rx and tx */
u32 rx = 0, tx = 0, i;
for (i = 0; i < 2; i++) {
- /**
+ /*
* INT_PHY and EXT_PHY1 share the same value location in the
* shmem. When num_phys is greater than 1, than this value
* applies only to EXT_PHY1
@@ -6953,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7085,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
*phy = phy_84823;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
break;
@@ -7099,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- /**
- * The shmem address of the phy version is located on different
- * structures. In case this structure is too old, do not set
- * the address
- */
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
dev_info.shared_hw_config.config2));
if (phy_index == EXT_PHY1) {
phy->ver_addr = shmem_base + offsetof(struct shmem_region,
port_mb[port].ext_phy_fw_version);
- /* Check specific mdc mdio settings */
- if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
- mdc_mdio_access = config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
} else {
u32 size = REG_RD(bp, shmem2_base);
@@ -7132,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- /**
+ /*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
* to prevent one port interfere with another port's CL45 operations.
@@ -7167,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
/* Populate the default phy configuration for MF mode */
if (phy_index == EXT_PHY2) {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config2));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region,
+ dev_info.
port_hw_config[params->port].speed_capability_mask2));
} else {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_hw_config[params->port].speed_capability_mask));
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
}
DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
" 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7325,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
else if (phy_index == EXT_PHY2)
actual_phy_idx = EXT_PHY1;
}
- params->phy[actual_phy_idx].req_flow_ctrl =
+ params->phy[actual_phy_idx].req_flow_ctrl =
params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
@@ -7378,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
set_phy_vars(params);
DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
- if (CHIP_REV_IS_FPGA(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
- /* enable on E1.5 FPGA */
- if (CHIP_IS_E1H(bp)) {
- vars->flow_ctrl |=
- (BNX2X_FLOW_CTRL_TX |
- BNX2X_FLOW_CTRL_RX);
- vars->link_status |=
- (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
- }
-
- bnx2x_emac_enable(params, vars, 0);
- if (!(CHIP_IS_E2(bp)))
- bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
- if (CHIP_REV_IS_EMUL(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
- bnx2x_bmac_enable(params, vars, 0);
-
- bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
if (params->loopback_mode == LOOPBACK_BMAC) {
vars->link_up = 1;
@@ -7444,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if (params->loopback_mode == LOOPBACK_EMAC) {
@@ -7461,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7485,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
} else
- bnx2x_bmac_enable(params, vars, 0);
-
+ bnx2x_bmac_enable(params, vars, 0);
if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
params->phy[INT_PHY].config_loopback(
@@ -7504,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params);
}
}
-
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
@@ -7525,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
return 0;
}
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy)
+ u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7534,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7605,10 +7817,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
- s8 port;
+ s8 port = 0;
s8 port_of_path = 0;
-
- bnx2x_ext_phy_hw_reset(bp, 0);
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7633,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7663,7 +7879,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
@@ -7671,34 +7886,26 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
-
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8073_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
/* Only set bit 10 = 1 (Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN,
- (val | 1<<10));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
}
- /* Toggle Transmitter: Power down and then up with 600ms
- delay between */
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
msleep(600);
/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7706,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
/* set GPIO2 back to LOW */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
}
@@ -7771,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
/* Set fault module detected LED on */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
}
return 0;
}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
- s8 port;
+ s8 port, reset_gpio;
u32 swap_val, swap_override;
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
s8 port_of_path;
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ reset_gpio = MISC_REGISTERS_GPIO_1;
port = 1;
- bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
/* Calculate the port based on port swap */
port ^= (swap_val && swap_override);
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
msleep(5);
/* PART1 - Reset both phys */
@@ -7832,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7848,27 +8111,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
- if (CHIP_IS_E2(bp))
+ if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8727_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
- }
+ }
return 0;
}
@@ -7893,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- /* GPIO1 affects both ports, so there's need to pull
- it for single port alone */
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
@@ -7904,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
default:
DP(NETIF_MSG_LINK,
- "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
- ext_phy_type);
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
break;
}
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
return rc;
}
@@ -7916,12 +8175,20 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id)
{
u8 rc = 0;
+ u32 phy_ver;
u8 phy_index;
u32 ext_phy_type, ext_phy_config;
DP(NETIF_MSG_LINK, "Begin common phy init\n");
- if (CHIP_REV_IS_EMUL(bp))
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
return 0;
+ }
/* Read the ext_phy_type for arbitrary port(0) */
for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c4..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
-#define SPEED_AUTO_NEG 0
+#define SPEED_AUTO_NEG 0
#define SPEED_12000 12000
#define SPEED_12500 12500
#define SPEED_13000 13000
@@ -44,8 +44,8 @@
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
-#define SFP_EEPROM_PART_NO_ADDR 0x28
-#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
@@ -201,12 +201,14 @@ struct link_params {
/* Default / User Configuration */
u8 loopback_mode;
-#define LOOPBACK_NONE 0
-#define LOOPBACK_EMAC 1
-#define LOOPBACK_BMAC 2
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
-#define LOOPBACK_EXT 5
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
/* Phy register parameter */
u32 chip_id;
+ /* features */
u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
+
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
- const u32 cos1_bw);
+ const u32 cos1_bw);
/* Used to configure the ETS to strict */
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1e..ae8d20a2b4fc 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -5296,10 +5296,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
}
}
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
-
bnx2x_setup_fan_failure_detection(bp);
/* clear PXP2 attentions */
@@ -5503,9 +5499,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -6463,12 +6456,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
/* Send a SET_MAC ramrod */
- bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+ bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
cam_offset, 0);
- bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
return 0;
}
@@ -8379,13 +8373,60 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
}
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /* If mamimum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8398,7 +8439,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI NPAR MAC */
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
if (IS_MF_SI(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8406,8 +8449,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
- }
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ /* Disable iSCSI OOO if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_OOO_FLAG |
+ NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ BNX2X_DEV_INFO("Read FCoE MAC to "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_FCOE_FLAG;
}
#endif
} else {
@@ -8421,7 +8495,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
#endif
}
@@ -8429,14 +8503,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Inform the upper layers about FCoE MAC */
+ /* Set the FCoE MAC in modes other then MF_SI */
if (!CHIP_IS_E1x(bp)) {
if (IS_MF_SD(bp))
- memcpy(bp->fip_mac, bp->dev->dev_addr,
- sizeof(bp->fip_mac));
- else
- memcpy(bp->fip_mac, bp->iscsi_mac,
- sizeof(bp->fip_mac));
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ else if (!IS_MF(bp))
+ memcpy(fip_mac, iscsi_mac, ETH_ALEN);
}
#endif
}
@@ -8599,6 +8671,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
return rc;
}
@@ -9862,7 +9938,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
int rc = 0;
mutex_lock(&bp->cnic_mutex);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
if (c_ops)
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
mutex_unlock(&bp->cnic_mutex);
@@ -10072,6 +10149,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
cp->drv_owner = THIS_MODULE;
cp->chip_id = CHIP_ID(bp);
cp->pdev = bp->pdev;
@@ -10092,6 +10176,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
"starting cid %d\n",
cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d61..1c89f19a4425 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
@@ -6194,7 +6195,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
#define IGU_FUNC_BASE 0x0400
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e2bb39..1024ae158227 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
if (!(dev->flags & IFF_MASTER))
goto out;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
goto out;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c65129..5c6fba802f2b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
goto out;
}
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf1..1df9f0ea9184 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1372,8 +1372,8 @@ static int bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- unsigned long features = bond_dev->features;
- unsigned long vlan_features = 0;
+ u32 features = bond_dev->features;
+ u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i;
@@ -1400,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(features, NULL);
- bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+ bond_dev->features = netdev_fix_features(bond_dev, features);
+ bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (!slave || !slave_do_arp_validate(bond, slave))
goto out_unlock;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out_unlock;
+
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto out_unlock;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c5380..72bb0f6cc9bf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
bond->dev->name, new_value);
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade9..5dec456fd4a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
As only the sending and receiving of CAN frames is implemented, this
driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -117,6 +117,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/usb/Kconfig"
+source "drivers/net/can/softing/Kconfig"
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f9..53c82a71778e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
obj-y += usb/
+obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..2532b9631538 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -40,22 +41,23 @@
#include <mach/board.h>
-#define AT91_NAPI_WEIGHT 12
+#define AT91_NAPI_WEIGHT 11
/*
* RX/TX Mailbox split
* don't dare to touch
*/
-#define AT91_MB_RX_NUM 12
+#define AT91_MB_RX_NUM 11
#define AT91_MB_TX_SHIFT 2
-#define AT91_MB_RX_FIRST 0
+#define AT91_MB_RX_FIRST 1
#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
#define AT91_MB_RX_SPLIT 8
#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+ ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
struct clk *clk;
struct at91_can_data *pdata;
+
+ canid_t mb0_id;
};
static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
set_mb_mode_prio(priv, mb, mode, 0);
}
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+ u32 reg_mid;
+
+ if (can_id & CAN_EFF_FLAG)
+ reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ else
+ reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+ return reg_mid;
+}
+
/*
* Swtich transceiver on or off
*/
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
unsigned int i;
+ u32 reg_mid;
/*
- * The first 12 mailboxes are used as a reception FIFO. The
- * last mailbox is configured with overwrite option. The
- * overwrite flag indicates a FIFO overflow.
+ * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+ * mailbox is disabled. The next 11 mailboxes are used as a
+ * reception FIFO. The last mailbox is configured with
+ * overwrite option. The overwrite flag indicates a FIFO
+ * overflow.
*/
+ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+ for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+ set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+ at91_write(priv, AT91_MID(i), reg_mid);
+ at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
+ }
+
for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
- priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+ priv->tx_next = priv->tx_echo = 0;
+ priv->rx_next = AT91_MB_RX_FIRST;
}
static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
-
- if (cf->can_id & CAN_EFF_FLAG)
- reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
- else
- reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+ reg_mid = at91_can_id_to_reg_mid(cf->can_id);
reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
(cf->can_dlc << 16) | AT91_MCR_MTCR;
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* Theory of Operation:
*
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
*
* Like it or not, but the chip always saves a received CAN message
* into the first free mailbox it finds (starting with the
* lowest). This makes it very difficult to read the messages in the
* right order from the chip. This is how we work around that problem:
*
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
* scheduled. We read the mailbox, but do _not_ reenable the mb (to
* receive another message).
*
* lower mbxs upper
- * ______^______ __^__
- * / \ / \
+ * ____^______ __^__
+ * / \ / \
* +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
* +-+-+-+-+-+-+-+-++-+-+-+-+
* 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
* 0 1 2 3 4 5 6 7 8 9 0 1 / box
+ * ^
+ * |
+ * \
+ * unused, due to chip bug
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
"order of incoming frames cannot be guaranteed\n");
again:
- for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
- mb < AT91_MB_RX_NUM && quota > 0;
+ for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+ mb < AT91_MB_RX_LAST + 1 && quota > 0;
reg_sr = at91_read(priv, AT91_SR),
- mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+ mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
at91_read_msg(dev, mb);
/* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
- quota > 0 && mb >= AT91_MB_RX_NUM) {
- priv->rx_next = 0;
+ quota > 0 && mb > AT91_MB_RX_LAST) {
+ priv->rx_next = AT91_MB_RX_FIRST;
goto again;
}
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+ if (priv->mb0_id & CAN_EFF_FLAG)
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+ else
+ return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct at91_priv *priv = netdev_priv(ndev);
+ unsigned long can_id;
+ ssize_t ret;
+ int err;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_UP) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ err = strict_strtoul(buf, 0, &can_id);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
+ if (can_id & CAN_EFF_FLAG)
+ can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ can_id &= CAN_SFF_MASK;
+
+ priv->mb0_id = can_id;
+ ret = count;
+
+ out:
+ rtnl_unlock();
+ return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO,
+ at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+ &dev_attr_mb0_id.attr,
+ NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+ .attrs = at91_sysfs_attrs,
+};
+
static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
dev->netdev_ops = &at91_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
+ dev->sysfs_groups[0] = &at91_sysfs_attr_group;
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
+ priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..92bd6bdde5e3
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+ tristate "Softing Gmbh CAN generic support"
+ depends on CAN_DEV
+ ---help---
+ Support for CAN cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Those cards typically use Dual Port RAM to communicate
+ with the host CPU. The interface is then identical for PCI
+ and PCMCIA cards. This driver operates on a platform device,
+ which has been created by softing_cs or softing_pci driver.
+ Warning:
+ The API of the card does not allow fine control per bus, but
+ controls the 2 busses on the card together.
+ As such, some actions (start/stop/busoff recovery) on 1 bus
+ must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+ tristate "Softing Gmbh CAN pcmcia cards"
+ depends on PCMCIA
+ select CAN_SOFTING
+ ---help---
+ Support for PCMCIA cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ You need firmware for these, which you can get at
+ http://developer.berlios.de/projects/socketcan/
+ This version of the driver is written against
+ firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+ In order to use the card as CAN device, you need the Softing generic
+ support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+ struct can_priv can; /* must be the first member! */
+ struct net_device *netdev;
+ struct softing *card;
+ struct {
+ int pending;
+ /* variables wich hold the circular buffer */
+ int echo_put;
+ int echo_get;
+ } tx;
+ struct can_bittiming_const btr_const;
+ int index;
+ uint8_t output;
+ uint16_t chip;
+};
+#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+ const struct softing_platform_data *pdat;
+ struct platform_device *pdev;
+ struct net_device *net[2];
+ spinlock_t spin; /* protect this structure & DPRAM access */
+ ktime_t ts_ref;
+ ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+ struct {
+ /* indication of firmware status */
+ int up;
+ /* protection of the 'up' variable */
+ struct mutex lock;
+ } fw;
+ struct {
+ int nr;
+ int requested;
+ int svc_count;
+ unsigned int dpram_position;
+ } irq;
+ struct {
+ int pending;
+ int last_bus;
+ /*
+ * keep the bus that last tx'd a message,
+ * in order to let every netdev queue resume
+ */
+ } tx;
+ __iomem uint8_t *dpram;
+ unsigned long dpram_phys;
+ unsigned long dpram_size;
+ struct {
+ uint16_t fw_version, hw_version, license, serial;
+ uint16_t chip[2];
+ unsigned int freq; /* remote cpu's operating frequency */
+ } id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+ const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX 0x0000
+ #define DPRAM_RX_SIZE 32
+ #define DPRAM_RX_CNT 16
+#define DPRAM_RX_RD 0x0201 /* uint8_t */
+#define DPRAM_RX_WR 0x0205 /* uint8_t */
+#define DPRAM_RX_LOST 0x0207 /* uint8_t */
+
+#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
+#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
+#define DPRAM_FCT_HOST 0x032b /* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
+#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
+#define DPRAM_RESET 0x0341 /* uint16_t */
+#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
+#define DPRAM_RESET_TIME 0x034d /* uint16_t */
+#define DPRAM_TIME 0x0350 /* uint64_t */
+#define DPRAM_WR_START 0x0358 /* uint8_t */
+#define DPRAM_WR_END 0x0359 /* uint8_t */
+#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
+#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
+
+#define DPRAM_TX 0x0400 /* uint16_t */
+ #define DPRAM_TX_SIZE 16
+ #define DPRAM_TX_CNT 32
+#define DPRAM_TX_RD 0x0601 /* uint8_t */
+#define DPRAM_TX_WR 0x0605 /* uint8_t */
+
+#define DPRAM_COMMAND 0x07e0 /* uint16_t */
+#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
+#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
+#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
+
+#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
+
+#define TXMAX (DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE 0
+#define RES_OK 1
+#define RES_NOK 2
+#define RES_UNKNOWN 3
+/* DPRAM flags */
+#define CMD_TX 0x01
+#define CMD_ACK 0x02
+#define CMD_XTD 0x04
+#define CMD_RTR 0x08
+#define CMD_ERR 0x10
+#define CMD_BUS2 0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF 0x80
+#define SF_MASK_EPASSIVE 0x60
+
+/* bus states */
+#define STATE_BUSOFF 2
+#define STATE_EPASSIVE 1
+#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..300fe75dd1a7
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+ .name = "CANcard",
+ .manf = 0x0168, .prod = 0x001,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-NEC",
+ .manf = 0x0168, .prod = 0x002,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-SJA",
+ .manf = 0x0168, .prod = 0x004,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-2",
+ .manf = 0x0168, .prod = 0x005,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "Vector-CANcard",
+ .manf = 0x0168, .prod = 0x081,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-SJA",
+ .manf = 0x0168, .prod = 0x084,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-2",
+ .manf = 0x0168, .prod = 0x085,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "EDICcard-NEC",
+ .manf = 0x0168, .prod = 0x102,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "EDICcard-2",
+ .manf = 0x0168, .prod = 0x105,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ 0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+ const struct softing_platform_data *lp;
+
+ for (lp = softingcs_platform_data; lp->manf; ++lp) {
+ if ((lp->manf == manf) && (lp->prod == prod))
+ return lp;
+ }
+ return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+ return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+ return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+ void *priv_data)
+{
+ struct softing_platform_data *pdat = priv_data;
+ struct resource *pres;
+ int memspeed = 0;
+
+ WARN_ON(!pdat);
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (resource_size(pres) < 0x1000)
+ return -ERANGE;
+
+ pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ if (pdat->generation < 2) {
+ pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+ memspeed = 3;
+ } else {
+ pres->flags |= WIN_DATA_WIDTH_16;
+ }
+ return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+ struct platform_device *pdev = pcmcia->priv;
+
+ /* free bits */
+ platform_device_unregister(pdev);
+ /* release pcmcia stuff */
+ pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+ int ret;
+ struct platform_device *pdev;
+ const struct softing_platform_data *pdat;
+ struct resource *pres;
+ struct dev {
+ struct platform_device pdev;
+ struct resource res[2];
+ } *dev;
+
+ /* find matching platform_data */
+ pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+ if (!pdat)
+ return -ENOTTY;
+
+ /* setup pcmcia device */
+ pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+ CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+ ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+ if (ret)
+ goto pcmcia_failed;
+
+ ret = pcmcia_enable_device(pcmcia);
+ if (ret < 0)
+ goto pcmcia_failed;
+
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (!pres) {
+ ret = -EBADF;
+ goto pcmcia_bad;
+ }
+
+ /* create softing platform device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto mem_failed;
+ }
+ dev->pdev.resource = dev->res;
+ dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+ dev->pdev.dev.release = softingcs_pdev_release;
+
+ pdev = &dev->pdev;
+ pdev->dev.platform_data = (void *)pdat;
+ pdev->dev.parent = &pcmcia->dev;
+ pcmcia->priv = pdev;
+
+ /* platform device resources */
+ pdev->resource[0].flags = IORESOURCE_MEM;
+ pdev->resource[0].start = pres->start;
+ pdev->resource[0].end = pres->end;
+
+ pdev->resource[1].flags = IORESOURCE_IRQ;
+ pdev->resource[1].start = pcmcia->irq;
+ pdev->resource[1].end = pdev->resource[1].start;
+
+ /* platform device setup */
+ spin_lock(&softingcs_index_lock);
+ pdev->id = softingcs_index++;
+ spin_unlock(&softingcs_index_lock);
+ pdev->name = "softing";
+ dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+ ret = platform_device_register(pdev);
+ if (ret < 0)
+ goto platform_failed;
+
+ dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+ return 0;
+
+platform_failed:
+ kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+ pcmcia_disable_device(pcmcia);
+ pcmcia->priv = NULL;
+ return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+ /* softing */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+ /* vector, manufacturer? */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+ /* EDIC */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+ .owner = THIS_MODULE,
+ .name = "softingcs",
+ .id_table = softingcs_ids,
+ .probe = softingcs_probe,
+ .remove = __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+ spin_lock_init(&softingcs_index_lock);
+ return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+ pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+ ", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+ iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+ iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 1 * HZ;
+ /* wait for card */
+ do {
+ /* DPRAM_FCT_HOST is _not_ aligned */
+ ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+ (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ /* read return-value now */
+ return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+ if ((ret != vector) || time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (1);
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+ int ret;
+
+ ret = _softing_fct_cmd(card, cmd, 0, msg);
+ if (ret > 0) {
+ dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+ ret = -EIO;
+ }
+ return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+ iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 3 * HZ;
+ /* wait for card */
+ do {
+ ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ return 0;
+ if (time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (!signal_pending(current));
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+ uint16_t *plen, const uint8_t **pdat)
+{
+ uint16_t checksum[2];
+ const uint8_t *mem;
+ const uint8_t *end;
+
+ /*
+ * firmware records are a binary, unaligned stream composed of:
+ * uint16_t type;
+ * uint32_t addr;
+ * uint16_t len;
+ * uint8_t dat[len];
+ * uint16_t checksum;
+ * all values in little endian.
+ * We could define a struct for this, with __attribute__((packed)),
+ * but would that solve the alignment in _all_ cases (cfr. the
+ * struct itself may be an odd address)?
+ *
+ * I chose to use leXX_to_cpup() since this solves both
+ * endianness & alignment.
+ */
+ mem = *pmem;
+ *ptype = le16_to_cpup((void *)&mem[0]);
+ *paddr = le32_to_cpup((void *)&mem[2]);
+ *plen = le16_to_cpup((void *)&mem[6]);
+ *pdat = &mem[8];
+ /* verify checksum */
+ end = &mem[8 + *plen];
+ checksum[0] = le16_to_cpup((void *)end);
+ for (checksum[1] = 0; mem < end; ++mem)
+ checksum[1] += *mem;
+ if (checksum[0] != checksum[1])
+ return -EINVAL;
+ /* increment */
+ *pmem += 10 + *plen;
+ return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *dpram, unsigned int size, int offset)
+{
+ const struct firmware *fw;
+ int ret;
+ const uint8_t *mem, *end, *dat;
+ uint16_t type, len;
+ uint32_t addr;
+ uint8_t *buf = NULL;
+ int buflen = 0;
+ int8_t type_end = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret < 0)
+ return ret;
+ dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+ ", offset %c0x%04x\n",
+ card->pdat->name, file, (unsigned int)fw->size,
+ (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type != 0xffff)
+ goto failed;
+ if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type == 3) {
+ /* start address, not used here */
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if ((addr + len + offset) > size)
+ goto failed;
+ memcpy_toio(&dpram[addr + offset], dat, len);
+ /* be sure to flush caches from IO space */
+ mb();
+ if (len > buflen) {
+ /* align buflen */
+ buflen = (len + (1024-1)) & ~(1024-1);
+ buf = krealloc(buf, buflen, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+ }
+ /* verify record data */
+ memcpy_fromio(buf, &dpram[addr + offset], len);
+ if (memcmp(buf, dat, len)) {
+ /* is not ok */
+ dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end)
+ /* no end record seen */
+ goto failed;
+ ret = 0;
+failed:
+ kfree(buf);
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+ const struct firmware *fw;
+ const uint8_t *mem, *end, *dat;
+ int ret, j;
+ uint16_t type, len;
+ uint32_t addr, start_addr = 0;
+ unsigned int sum, rx_sum;
+ int8_t type_end = 0, type_entrypoint = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret) {
+ dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+ file, ret);
+ return ret;
+ }
+ dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+ file, (unsigned long)fw->size);
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+ ret = -EINVAL;
+ if (type != 0xffff) {
+ dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+ type);
+ goto failed;
+ }
+ if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+ dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+ len, dat);
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+
+ if (type == 3) {
+ /* start address */
+ start_addr = addr;
+ type_entrypoint = 1;
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ dev_alert(&card->pdev->dev,
+ "unknown record type 0x%04x\n", type);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* regualar data */
+ for (sum = 0, j = 0; j < len; ++j)
+ sum += dat[j];
+ /* work in 16bit (target) */
+ sum &= 0xffff;
+
+ memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+ iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+ &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+ iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+ ret = softing_bootloader_command(card, 1, "loading app.");
+ if (ret < 0)
+ goto failed;
+ /* verify checksum */
+ rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+ if (rx_sum != sum) {
+ dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+ ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end || !type_entrypoint)
+ goto failed;
+ /* start application in card */
+ iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+ ret = softing_bootloader_command(card, 3, "start app.");
+ if (ret < 0)
+ goto failed;
+ ret = 0;
+failed:
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+ int ret;
+
+ do {
+ /* reset chip */
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+ iowrite8(1, &card->dpram[DPRAM_RESET]);
+ iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+ ret = softing_fct_cmd(card, 0, "reset_can");
+ if (!ret)
+ break;
+ if (signal_pending(current))
+ /* don't wait any longer */
+ break;
+ } while (1);
+ card->tx.pending = 0;
+ return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+ int ret;
+ /* sync */
+ ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+ if (ret < 0)
+ goto failed;
+
+ ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_reset_chip(card);
+ if (ret < 0)
+ goto failed;
+ /* get_serial */
+ ret = softing_fct_cmd(card, 43, "get_serial_number");
+ if (ret < 0)
+ goto failed;
+ card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+ /* get_version */
+ ret = softing_fct_cmd(card, 12, "get_version");
+ if (ret < 0)
+ goto failed;
+ card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+ card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+ card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+ card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+ card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+ return 0;
+failed:
+ return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+ uint64_t ovf;
+
+ card->ts_ref = ktime_get();
+
+ /* 16MHz is the reference */
+ ovf = 0x100000000ULL * 16;
+ do_div(ovf, card->pdat->freq ?: 16);
+
+ card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+ uint64_t rawl;
+ ktime_t now, real_offset;
+ ktime_t target;
+ ktime_t tmp;
+
+ now = ktime_get();
+ real_offset = ktime_sub(ktime_get_real(), now);
+
+ /* find nsec from card */
+ rawl = raw * 16;
+ do_div(rawl, card->pdat->freq ?: 16);
+ target = ktime_add_us(card->ts_ref, rawl);
+ /* test for overflows */
+ tmp = ktime_add(target, card->ts_overflow);
+ while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+ card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+ target = tmp;
+ tmp = ktime_add(target, card->ts_overflow);
+ }
+ return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+ int ret;
+ struct softing *card;
+ struct softing_priv *priv;
+ struct net_device *netdev;
+ int bus_bitmask_start;
+ int j, error_reporting;
+ struct can_frame msg;
+ const struct can_bittiming *bt;
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+
+ if (!card->fw.up)
+ return -EIO;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return ret;
+
+ bus_bitmask_start = 0;
+ if (dev && up)
+ /* prepare to start this bus as well */
+ bus_bitmask_start |= (1 << priv->index);
+ /* bring netdevs down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+
+ if (dev != netdev)
+ netif_stop_queue(netdev);
+
+ if (netif_running(netdev)) {
+ if (dev != netdev)
+ bus_bitmask_start |= (1 << j);
+ priv->tx.pending = 0;
+ priv->tx.echo_put = 0;
+ priv->tx.echo_get = 0;
+ /*
+ * this bus' may just have called open_candev()
+ * which is rather stupid to call close_candev()
+ * already
+ * but we may come here from busoff recovery too
+ * in which case the echo_skb _needs_ flushing too.
+ * just be sure to call open_candev() again
+ */
+ close_candev(netdev);
+ }
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+ card->tx.pending = 0;
+
+ softing_enable_irq(card, 0);
+ ret = softing_reset_chip(card);
+ if (ret)
+ goto failed;
+ if (!bus_bitmask_start)
+ /* no busses to be brought up */
+ goto card_done;
+
+ if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+ && (softing_error_reporting(card->net[0])
+ != softing_error_reporting(card->net[1]))) {
+ dev_alert(&card->pdev->dev,
+ "err_reporting flag differs for busses\n");
+ goto invalid;
+ }
+ error_reporting = 0;
+ if (bus_bitmask_start & 1) {
+ netdev = card->net[0];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip 1 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+ if (ret < 0)
+ goto failed;
+ /* set mode */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 3, "set_mode[0]");
+ if (ret < 0)
+ goto failed;
+ /* set filter */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 7, "set_filter[0]");
+ if (ret < 0)
+ goto failed;
+ /* set output control */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 5, "set_output[0]");
+ if (ret < 0)
+ goto failed;
+ }
+ if (bus_bitmask_start & 2) {
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip2 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+ if (ret < 0)
+ goto failed;
+ /* set mode2 */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 4, "set_mode[1]");
+ if (ret < 0)
+ goto failed;
+ /* set filter2 */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 8, "set_filter[1]");
+ if (ret < 0)
+ goto failed;
+ /* set output control2 */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 6, "set_output[1]");
+ if (ret < 0)
+ goto failed;
+ }
+ /* enable_error_frame */
+ /*
+ * Error reporting is switched off at the moment since
+ * the receiving of them is not yet 100% verified
+ * This should be enabled sooner or later
+ *
+ if (error_reporting) {
+ ret = softing_fct_cmd(card, 51, "enable_error_frame");
+ if (ret < 0)
+ goto failed;
+ }
+ */
+ /* initialize interface */
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+ ret = softing_fct_cmd(card, 17, "initialize_interface");
+ if (ret < 0)
+ goto failed;
+ /* enable_fifo */
+ ret = softing_fct_cmd(card, 36, "enable_fifo");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack */
+ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack2 */
+ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+ if (ret < 0)
+ goto failed;
+ /* start_chip */
+ ret = softing_fct_cmd(card, 11, "start_chip");
+ if (ret < 0)
+ goto failed;
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+ if (card->pdat->generation < 2) {
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ /* flush the DPRAM caches */
+ wmb();
+ }
+
+ softing_initialize_timestamp(card);
+
+ /*
+ * do socketcan notifications/status changes
+ * from here, no errors should occur, or the failed: part
+ * must be reviewed
+ */
+ memset(&msg, 0, sizeof(msg));
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+ msg.can_dlc = CAN_ERR_DLC;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!(bus_bitmask_start & (1 << j)))
+ continue;
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ open_candev(netdev);
+ if (dev != netdev) {
+ /* notify other busses on the restart */
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ ++priv->can.can_stats.restarts;
+ }
+ netif_wake_queue(netdev);
+ }
+
+ /* enable interrupts */
+ ret = softing_enable_irq(card, 1);
+ if (ret)
+ goto failed;
+card_done:
+ mutex_unlock(&card->fw.lock);
+ return 0;
+invalid:
+ ret = -EINVAL;
+failed:
+ softing_enable_irq(card, 0);
+ softing_reset_chip(card);
+ mutex_unlock(&card->fw.lock);
+ /* bring all other interfaces down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ dev_close(netdev);
+ }
+ return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+ struct softing *card = priv->card;
+
+ switch (priv->chip) {
+ case 1000:
+ return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+ case 5:
+ return 0x60;
+ default:
+ return 0x40;
+ }
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..5157e15e96eb
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+ struct can_priv *can = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+ return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct softing_priv *priv = netdev_priv(dev);
+ struct softing *card = priv->card;
+ int ret;
+ uint8_t *ptr;
+ uint8_t fifo_wr, fifo_rd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ uint8_t buf[DPRAM_TX_SIZE];
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&card->spin);
+
+ ret = NETDEV_TX_BUSY;
+ if (!card->fw.up ||
+ (card->tx.pending >= TXMAX) ||
+ (priv->tx.pending >= TX_ECHO_SKB_MAX))
+ goto xmit_done;
+ fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+ fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+ if (fifo_wr == fifo_rd)
+ /* fifo full */
+ goto xmit_done;
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ *ptr = CMD_TX;
+ if (cf->can_id & CAN_RTR_FLAG)
+ *ptr |= CMD_RTR;
+ if (cf->can_id & CAN_EFF_FLAG)
+ *ptr |= CMD_XTD;
+ if (priv->index)
+ *ptr |= CMD_BUS2;
+ ++ptr;
+ *ptr++ = cf->can_dlc;
+ *ptr++ = (cf->can_id >> 0);
+ *ptr++ = (cf->can_id >> 8);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ *ptr++ = (cf->can_id >> 16);
+ *ptr++ = (cf->can_id >> 24);
+ } else {
+ /* increment 1, not 2 as you might think */
+ ptr += 1;
+ }
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ memcpy(ptr, &cf->data[0], cf->can_dlc);
+ memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+ buf, DPRAM_TX_SIZE);
+ if (++fifo_wr >= DPRAM_TX_CNT)
+ fifo_wr = 0;
+ iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+ card->tx.last_bus = priv->index;
+ ++card->tx.pending;
+ ++priv->tx.pending;
+ can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ ++priv->tx.echo_put;
+ if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_put = 0;
+ /* can_put_echo_skb() saves the skb, safe to return TX_OK */
+ ret = NETDEV_TX_OK;
+xmit_done:
+ spin_unlock(&card->spin);
+ if (card->tx.pending >= TXMAX) {
+ int j;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->net[j])
+ netif_stop_queue(card->net[j]);
+ }
+ }
+ if (ret != NETDEV_TX_OK)
+ netif_stop_queue(dev);
+
+ return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+ memcpy(cf, msg, sizeof(*msg));
+ skb->tstamp = ktime;
+ return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ ktime_t ktime;
+ struct can_frame msg;
+ int cnt = 0, lost_msg;
+ uint8_t fifo_rd, fifo_wr, cmd;
+ uint8_t *ptr;
+ uint32_t tmp_u32;
+ uint8_t buf[DPRAM_RX_SIZE];
+
+ memset(&msg, 0, sizeof(msg));
+ /* test for lost msgs */
+ lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+ if (lost_msg) {
+ int j;
+ /* reset condition */
+ iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+ /* prepare msg */
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ msg.can_dlc = CAN_ERR_DLC;
+ msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ /*
+ * service to all busses, we don't know which it was applicable
+ * but only service busses that are online
+ */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ if (!canif_is_active(netdev))
+ /* a dead bus has no overflows */
+ continue;
+ ++netdev->stats.rx_over_errors;
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ }
+ /* prepare for other use */
+ memset(&msg, 0, sizeof(msg));
+ ++cnt;
+ }
+
+ fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+ fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+ if (++fifo_rd >= DPRAM_RX_CNT)
+ fifo_rd = 0;
+ if (fifo_wr == fifo_rd)
+ return cnt;
+
+ memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+ DPRAM_RX_SIZE);
+ mb();
+ /* trigger dual port RAM */
+ iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+ ptr = buf;
+ cmd = *ptr++;
+ if (cmd == 0xff)
+ /* not quite usefull, probably the card has got out */
+ return 0;
+ netdev = card->net[0];
+ if (cmd & CMD_BUS2)
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+
+ if (cmd & CMD_ERR) {
+ uint8_t can_state, state;
+
+ state = *ptr++;
+
+ msg.can_id = CAN_ERR_FLAG;
+ msg.can_dlc = CAN_ERR_DLC;
+
+ if (state & SF_MASK_BUSOFF) {
+ can_state = CAN_STATE_BUS_OFF;
+ msg.can_id |= CAN_ERR_BUSOFF;
+ state = STATE_BUSOFF;
+ } else if (state & SF_MASK_EPASSIVE) {
+ can_state = CAN_STATE_ERROR_PASSIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+ state = STATE_EPASSIVE;
+ } else {
+ can_state = CAN_STATE_ERROR_ACTIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ state = STATE_EACTIVE;
+ }
+ /* update DPRAM */
+ iowrite8(state, &card->dpram[priv->index ?
+ DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+
+ ++netdev->stats.rx_errors;
+ /* update internal status */
+ if (can_state != priv->can.state) {
+ priv->can.state = can_state;
+ if (can_state == CAN_STATE_ERROR_PASSIVE)
+ ++priv->can.can_stats.error_passive;
+ else if (can_state == CAN_STATE_BUS_OFF) {
+ /* this calls can_close_cleanup() */
+ can_bus_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ /* trigger socketcan */
+ softing_netdev_rx(netdev, &msg, ktime);
+ }
+
+ } else {
+ if (cmd & CMD_RTR)
+ msg.can_id |= CAN_RTR_FLAG;
+ msg.can_dlc = get_can_dlc(*ptr++);
+ if (cmd & CMD_XTD) {
+ msg.can_id |= CAN_EFF_FLAG;
+ msg.can_id |= le32_to_cpup((void *)ptr);
+ ptr += 4;
+ } else {
+ msg.can_id |= le16_to_cpup((void *)ptr);
+ ptr += 2;
+ }
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ memcpy(&msg.data[0], ptr, 8);
+ ptr += 8;
+ /* update socket */
+ if (cmd & CMD_ACK) {
+ /* acknowledge, was tx msg */
+ struct sk_buff *skb;
+ skb = priv->can.echo_skb[priv->tx.echo_get];
+ if (skb)
+ skb->tstamp = ktime;
+ can_get_echo_skb(netdev, priv->tx.echo_get);
+ ++priv->tx.echo_get;
+ if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_get = 0;
+ if (priv->tx.pending)
+ --priv->tx.pending;
+ if (card->tx.pending)
+ --card->tx.pending;
+ ++netdev->stats.tx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.tx_bytes += msg.can_dlc;
+ } else {
+ int ret;
+
+ ret = softing_netdev_rx(netdev, &msg, ktime);
+ if (ret == NET_RX_SUCCESS) {
+ ++netdev->stats.rx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.rx_bytes += msg.can_dlc;
+ } else {
+ ++netdev->stats.rx_dropped;
+ }
+ }
+ }
+ ++cnt;
+ return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ int j, offset, work_done;
+
+ work_done = 0;
+ spin_lock_bh(&card->spin);
+ while (softing_handle_1(card) > 0) {
+ ++card->irq.svc_count;
+ ++work_done;
+ }
+ spin_unlock_bh(&card->spin);
+ /* resume tx queue's */
+ offset = card->tx.last_bus;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->tx.pending >= TXMAX)
+ break;
+ netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ if (!canif_is_active(netdev))
+ /* it makes no sense to wake dead busses */
+ continue;
+ if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+ continue;
+ ++work_done;
+ netif_wake_queue(netdev);
+ }
+ return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+ return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+ int ret;
+
+ /* check or determine and set bittime */
+ ret = open_candev(ndev);
+ if (!ret)
+ ret = softing_startstop(ndev, 1);
+ return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+ int ret;
+
+ netif_stop_queue(ndev);
+
+ /* softing cycle does close_candev() */
+ ret = softing_startstop(ndev, 0);
+ return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* softing_startstop does close_candev() */
+ ret = softing_startstop(ndev, 1);
+ return ret;
+ case CAN_MODE_STOP:
+ case CAN_MODE_SLEEP:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+ int ret;
+
+ if (!card->irq.nr) {
+ return 0;
+ } else if (card->irq.requested && !enable) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ } else if (!card->irq.requested && enable) {
+ ret = request_threaded_irq(card->irq.nr,
+ (card->pdat->generation >= 2) ?
+ softing_irq_v2 : softing_irq_v1,
+ softing_irq_thread, IRQF_SHARED,
+ dev_name(&card->pdev->dev), card);
+ if (ret) {
+ dev_alert(&card->pdev->dev,
+ "request_threaded_irq(%u) failed\n",
+ card->irq.nr);
+ return ret;
+ }
+ card->irq.requested = 1;
+ }
+ return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+ int fw_up = 0;
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ /* return -ERESTARTSYS */;
+ fw_up = card->fw.up;
+ card->fw.up = 0;
+
+ if (card->irq.requested && card->irq.nr) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ }
+ if (fw_up) {
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ }
+ mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+ int ret, j;
+ static const uint8_t stream[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+ unsigned char back[sizeof(stream)];
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ return -ERESTARTSYS;
+ if (card->fw.up) {
+ mutex_unlock(&card->fw.lock);
+ return 0;
+ }
+ /* reset board */
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 1);
+ /* boot card */
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+ j += sizeof(stream)) {
+
+ memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+ /* flush IO cache */
+ mb();
+ memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+ if (!memcmp(back, stream, sizeof(stream)))
+ continue;
+ /* memory is not equal */
+ dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+ ret = -EIO;
+ goto failed;
+ }
+ wmb();
+ /* load boot firmware */
+ ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->boot.offs - card->pdat->boot.addr);
+ if (ret < 0)
+ goto failed;
+ /* load loader firmware */
+ ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->load.offs - card->pdat->load.addr);
+ if (ret < 0)
+ goto failed;
+
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 0);
+ softing_clr_reset_dpram(card);
+ ret = softing_bootloader_command(card, 0, "card boot");
+ if (ret < 0)
+ goto failed;
+ ret = softing_load_app_fw(card->pdat->app.fw, card);
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_chip_poweron(card);
+ if (ret < 0)
+ goto failed;
+
+ card->fw.up = 1;
+ mutex_unlock(&card->fw.lock);
+ return 0;
+failed:
+ card->fw.up = 0;
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ mutex_unlock(&card->fw.lock);
+ return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+ struct softing *card = priv->card;
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ val &= 0xFF;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return -ERESTARTSYS;
+ if (netif_running(ndev)) {
+ mutex_unlock(&card->fw.lock);
+ return -EBUSY;
+ }
+ priv->output = val;
+ mutex_unlock(&card->fw.lock);
+ return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+ &dev_attr_channel.attr,
+ &dev_attr_chip.attr,
+ &dev_attr_output.attr,
+ NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+ .name = NULL,
+ .attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+ .ndo_open = softing_netdev_open,
+ .ndo_stop = softing_netdev_stop,
+ .ndo_start_xmit = softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4, /* overruled */
+ .brp_min = 1,
+ .brp_max = 32, /* overruled */
+ .brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+ uint16_t chip_id)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+
+ netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+ if (!netdev) {
+ dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+ return NULL;
+ }
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->card = card;
+ memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+ priv->btr_const.brp_max = card->pdat->max_brp;
+ priv->btr_const.sjw_max = card->pdat->max_sjw;
+ priv->can.bittiming_const = &priv->btr_const;
+ priv->can.clock.freq = 8000000;
+ priv->chip = chip_id;
+ priv->output = softing_default_output(netdev);
+ SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+ netdev->flags |= IFF_ECHO;
+ netdev->netdev_ops = &softing_netdev_ops;
+ priv->can.do_set_mode = softing_candev_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+ int ret;
+
+ netdev->sysfs_groups[0] = &netdev_sysfs_group;
+ ret = register_candev(netdev);
+ if (ret) {
+ dev_alert(&netdev->dev, "register failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+ unregister_candev(netdev);
+ free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_hardware.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_license.attr,
+ &dev_attr_frequency.attr,
+ &dev_attr_txpending.attr,
+ NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+ .name = NULL,
+ .attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+ struct softing *card = platform_get_drvdata(pdev);
+ int j;
+
+ /* first, disable card*/
+ softing_card_shutdown(card);
+
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ card->net[j] = NULL;
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+ iounmap(card->dpram);
+ kfree(card);
+ return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+ const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ struct softing *card;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ struct resource *pres;
+ int ret;
+ int j;
+
+ if (!pdat) {
+ dev_warn(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ if (pdat->nbus > ARRAY_SIZE(card->net)) {
+ dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+ return -EINVAL;
+ }
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+ card->pdat = pdat;
+ card->pdev = pdev;
+ platform_set_drvdata(pdev, card);
+ mutex_init(&card->fw.lock);
+ spin_lock_init(&card->spin);
+
+ ret = -EINVAL;
+ pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres)
+ goto platform_resource_failed;;
+ card->dpram_phys = pres->start;
+ card->dpram_size = pres->end - pres->start + 1;
+ card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ if (!card->dpram) {
+ dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+ goto ioremap_failed;
+ }
+
+ pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (pres)
+ card->irq.nr = pres->start;
+
+ /* reset card */
+ ret = softing_card_boot(card);
+ if (ret < 0) {
+ dev_alert(&pdev->dev, "failed to boot\n");
+ goto boot_failed;
+ }
+
+ /* only now, the chip's are known */
+ card->id.freq = card->pdat->freq;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+ if (ret < 0) {
+ dev_alert(&card->pdev->dev, "sysfs failed\n");
+ goto sysfs_failed;
+ }
+
+ ret = -ENOMEM;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ card->net[j] = netdev =
+ softing_netdev_create(card, card->id.chip[j]);
+ if (!netdev) {
+ dev_alert(&pdev->dev, "failed to make can[%i]", j);
+ goto netdev_failed;
+ }
+ priv = netdev_priv(card->net[j]);
+ priv->index = j;
+ ret = softing_netdev_register(netdev);
+ if (ret) {
+ free_candev(netdev);
+ card->net[j] = NULL;
+ dev_alert(&card->pdev->dev,
+ "failed to register can[%i]\n", j);
+ goto netdev_failed;
+ }
+ }
+ dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+ return 0;
+
+netdev_failed:
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+ softing_card_shutdown(card);
+boot_failed:
+ iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+ kfree(card);
+ return ret;
+}
+
+static struct platform_driver softing_driver = {
+ .driver = {
+ .name = "softing",
+ .owner = THIS_MODULE,
+ },
+ .probe = softing_pdev_probe,
+ .remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+ return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+ platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+ unsigned int manf;
+ unsigned int prod;
+ /*
+ * generation
+ * 1st with NEC or SJA1000
+ * 8bit, exclusive interrupt, ...
+ * 2nd only SJA1000
+ * 16bit, shared interrupt
+ */
+ int generation;
+ int nbus; /* # busses on device */
+ unsigned int freq; /* operating frequency in Hz */
+ unsigned int max_brp;
+ unsigned int max_sjw;
+ unsigned long dpram_size;
+ const char *name;
+ struct {
+ unsigned long offs;
+ unsigned long addr;
+ const char *fw;
+ } boot, load, app;
+ /*
+ * reset() function
+ * bring pdev in or out of reset, depending on value
+ */
+ int (*reset)(struct platform_device *pdev, int value);
+ int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566f..2d2d28f58e91 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+ return rcu_dereference_protected(cnic_ulp_tbl[type],
+ lockdep_is_held(&cnic_lock));
+}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type]) {
+ if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type] == NULL) {
+ if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
@@ -699,13 +706,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
- *page_table = (u32) dma->pg_map_arr[i];
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
@@ -713,13 +720,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
- *page_table = dma->pg_map_arr[i] & 0xffffffff;
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
@@ -2953,7 +2960,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
@@ -2977,7 +2985,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
@@ -3041,7 +3050,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
@@ -3065,7 +3074,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
@@ -4170,6 +4179,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+ u32 max_conn;
+
+ max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+ dev->max_iscsi_conn = max_conn;
+}
+
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4511,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ cnic_get_bnx2_iscsi_info(dev);
+
return 0;
}
@@ -4705,129 +4724,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
cp->rx_cons = *cp->rx_cons_ptr;
}
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
- u32 lower_addr)
-{
- u32 val;
- u8 mac[6];
-
- val = CNIC_RD(dev, upper_addr);
-
- mac[0] = (u8) (val >> 8);
- mac[1] = (u8) val;
-
- val = CNIC_RD(dev, lower_addr);
-
- mac[2] = (u8) (val >> 24);
- mac[3] = (u8) (val >> 16);
- mac[4] = (u8) (val >> 8);
- mac[5] = (u8) val;
-
- if (is_valid_ether_addr(mac)) {
- memcpy(dev->mac_addr, mac, 6);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
- struct cnic_local *cp = dev->cnic_priv;
- u32 base, base2, addr, addr1, val;
- int port = CNIC_PORT(cp);
-
- dev->max_iscsi_conn = 0;
- base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
- if (base == 0)
- return;
-
- base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
- MISC_REG_GENERIC_CR_0));
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_upper);
-
- addr1 = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_lower);
-
- cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
- addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
- val = CNIC_RD(dev, addr);
-
- if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
- u16 val16;
-
- addr = BNX2X_SHMEM_ADDR(base,
- drv_lic_key[port].max_iscsi_init_conn);
- val16 = CNIC_RD16(dev, addr);
-
- if (val16)
- val16 ^= 0x1e1e;
- dev->max_iscsi_conn = val16;
- }
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id))
- dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
- if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
- int func = CNIC_FUNC(cp);
- u32 mf_cfg_addr;
-
- if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
- mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
- mf_cfg_addr));
- else
- mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- /* Must determine if the MF is SD vs SI mode */
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.shared_feature_config.config);
- val = CNIC_RD(dev, addr);
- if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
- SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
- int rc;
-
- /* MULTI_FUNCTION_SI mode */
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].func_cfg);
- val = CNIC_RD(dev, addr);
- if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
- dev->max_iscsi_conn = 0;
-
- if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
- dev->max_fcoe_conn = 0;
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_upper);
- addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_lower);
- rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
- addr1);
- if (rc && func > 1)
- dev->max_iscsi_conn = 0;
-
- return;
- }
- }
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_mf_config[func].e1hov_tag);
-
- val = CNIC_RD(dev, addr);
- val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- dev->max_fcoe_conn = 0;
- dev->max_iscsi_conn = 0;
- }
- }
- if (!is_valid_ether_addr(dev->mac_addr))
- dev->max_iscsi_conn = 0;
-}
-
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4805,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cnic_init_bnx2x_kcq(dev);
- cnic_get_bnx2x_iscsi_info(dev);
-
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5343,6 +5237,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+ !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c3..4456260c653c 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
- struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+ struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe003..e01b49ee3591 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.12"
-#define CNIC_MODULE_RELDATE "Jan 03, 2011"
+#define CNIC_MODULE_VERSION "2.2.13"
+#define CNIC_MODULE_RELDATE "Jan 31, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
+#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
u32 fcoe_init_cid;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
+ u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3f..ec35d458102c 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2710,6 +2710,8 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ netif_carrier_off(dev);
+
if (!(adapter->flags & FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
@@ -3661,7 +3663,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
pi->port_id = i;
- netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e1369053..00bf595ebd67 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
+ spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cfb..daa7fe4b9fdd 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
};
#define E1000_STAT(str, m) { \
- .stat_string = str, \
- .type = E1000_STATS, \
- .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
- .stat_offset = offsetof(struct e1000_adapter, m) }
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
#define E1000_NETDEV_STAT(str, m) { \
- .stat_string = str, \
- .type = NETDEV_STATS, \
- .sizeof_stat = sizeof(((struct net_device *)0)->m), \
- .stat_offset = offsetof(struct net_device, m) }
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("tx_broadcast", stats.bptc),
E1000_STAT("rx_multicast", stats.mprc),
E1000_STAT("tx_multicast", stats.mptc),
- E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
- E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
- E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
E1000_STAT("multicast", stats.mprc),
E1000_STAT("collisions", stats.colc),
- E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
- E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
E1000_STAT("rx_crc_errors", stats.crcerrs),
- E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
E1000_STAT("rx_no_buffer_count", stats.rnbc),
E1000_STAT("rx_missed_errors", stats.mpc),
E1000_STAT("tx_aborted_errors", stats.ecol),
E1000_STAT("tx_carrier_errors", stats.tncrs),
- E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
- E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
E1000_STAT("tx_window_errors", stats.latecol),
E1000_STAT("tx_abort_late_coll", stats.latecol),
E1000_STAT("tx_deferred_ok", stats.dc),
@@ -684,20 +684,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
- /*
- * use a memcpy to save any previously configured
- * items like napi structs from having to be
- * reinitialized
- */
- memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
- rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
- memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@@ -1255,7 +1248,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u32 stat_reg = 0;
u16 phy_reg = 0;
s32 ret_val = 0;
@@ -1363,8 +1355,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
- stat_reg = er32(STATUS);
- if ((stat_reg & E1000_STATUS_FD) == 0)
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@@ -1982,14 +1973,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
int i;
char *p = NULL;
- e1000e_update_stats(adapter);
+ e1000e_get_stats64(netdev, &net_stats);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) netdev +
+ p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec1..232b42b7f7ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 ret_val = -E1000_ERR_NVM;
- s32 i = 0;
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
+ s32 i = 0;
+
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf66..96921de5df2e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
- u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
udelay(1);
- timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26b0812..5b916b01805f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1057,8 +1055,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
- netdev->stats.tx_bytes += total_tx_bytes;
- netdev->stats.tx_packets += total_tx_packets;
return count < tx_ring->count;
}
@@ -1245,8 +1241,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1426,8 +1420,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -2728,7 +2720,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
- u32 psrctl = 0;
u32 pages = 0;
/* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2827,6 +2818,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
/* Configure extra packet-split registers */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@@ -3028,7 +3021,6 @@ static void e1000_set_multi(struct net_device *netdev)
struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
- int i;
/* Check for Promiscuous and All Multicast modes */
@@ -3051,12 +3043,13 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
@@ -3338,6 +3331,8 @@ int e1000e_up(struct e1000_adapter *adapter)
return 0;
}
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3372,6 +3367,11 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3413,6 +3413,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->stats64_lock);
+
e1000e_set_interrupt_capability(adapter);
if (e1000_alloc_queues(adapter))
@@ -3886,7 +3888,7 @@ release:
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -3998,10 +4000,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- int ret_val;
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4147,7 +4150,6 @@ static void e1000_watchdog_task(struct work_struct *work)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
- int tx_pending = 0;
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
@@ -4285,7 +4287,9 @@ static void e1000_watchdog_task(struct work_struct *work)
}
link_up:
+ spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@@ -4299,21 +4303,18 @@ link_up:
e1000e_update_adaptive(&adapter->hw);
- if (!netif_carrier_ok(netdev)) {
- tx_pending = (e1000_desc_unused(tx_ring) + 1 <
- tx_ring->count);
- if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
/* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4384,13 +4385,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
if (err)
return err;
}
@@ -4897,16 +4898,55 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
- /* only return the current stats */
- return &netdev->stats;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
}
/**
@@ -5476,9 +5516,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
- int vector, msix_irq;
if (adapter->msix_entries) {
+ int vector, msix_irq;
+
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
disable_irq(msix_irq);
@@ -5675,7 +5716,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
+ .ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_multicast_list = e1000_set_multi,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134b..6ae31fcfb629 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db7..ca3be4f15556 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,8 +32,8 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.10"
-#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.2"
+#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -49,7 +49,7 @@ struct enic_msix_entry {
void *devid;
};
-#define ENIC_SET_APPLIED (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb3..89664c670972 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1318,18 +1318,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
vic_provinfo_free(vp);
if (err)
return err;
-
- enic->pp.set |= ENIC_SET_APPLIED;
break;
case PORT_REQUEST_DISASSOCIATE:
- enic->pp.set &= ~ENIC_SET_APPLIED;
break;
default:
return -EINVAL;
}
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
return 0;
}
@@ -1411,7 +1413,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
- if (!(enic->pp.set & ENIC_SET_APPLIED))
+ if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
err = enic_dev_init_done(enic, &done, &error);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 119aa2000c24..5ed8f9f9419f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1920,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ goto irq_fail;
}
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb5..8931168d3e74 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
+ struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
- p = rcu_dereference(bpq_devices.next);
+ p = rcu_dereference(list_next_rcu(&bpq_devices));
else
- p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+ p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc6..c1552b6f4a68 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cdc..281324e85980 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513d..200cc3209672 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
ret = sh_irda_set_baudrate(self, speed);
if (ret < 0)
- return ret;
+ goto sh_irda_hard_xmit_end;
self->tx_buff.len = 0;
if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
sh_irda_write(self, IRTFLR, self->tx_buff.len);
sh_irda_write(self, IRTCTR, ARMOD | TE);
- }
+ } else
+ goto sh_irda_hard_xmit_end;
dev_kfree_skb(skb);
return 0;
+
+sh_irda_hard_xmit_end:
+ sh_irda_set_baudrate(self, 9600);
+ netif_wake_queue(self->ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ dev_kfree_skb(skb);
+
+ return ret;
+
}
static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3ff..2300e4599520 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
struct socket sock;
struct socket_wq wq;
int vnet_hdr_sz;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
struct file *file;
unsigned int flags;
};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
struct macvlan_dev *vlan;
spin_lock(&macvtap_lock);
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_protected(q->vlan,
+ lockdep_is_held(&macvtap_lock));
if (vlan) {
int index = get_slot(vlan, q);
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
/* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
- q = rcu_dereference(vlan->taps[i]);
+ q = rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
}
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
else
@@ -583,7 +585,7 @@ err_kfree:
err:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
@@ -631,7 +633,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_count_rx(vlan, len, ret == 0, 0);
rcu_read_unlock_bh();
@@ -727,7 +729,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
dev_hold(vlan->dev);
rcu_read_unlock_bh();
@@ -736,7 +738,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
ret = 0;
- if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
dev_put(vlan->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a04..a7f2eed9a08a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
- unsigned long features;
+ u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+ u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
if (tso_enabled)
netdev->features |= flags;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
}
ndev = alloc_etherdev(sizeof(struct ns83820));
- dev = PRIV(ndev);
-
err = -ENOMEM;
- if (!dev)
+ if (!ndev)
goto out;
+ dev = PRIV(ndev);
dev->ndev = ndev;
spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a738..1bf12339441b 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -2247,7 +2247,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
pch_gbe_hal_phy_hw_reset(&adapter->hw);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c4466978..9f6d670748d1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
- atomic_long_read(&file->f_count));
+ pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
mutex_unlock(&ppp_mutex);
return err;
}
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (pf->kind != INTERFACE) {
/* can't happen */
- printk(KERN_ERR "PPP: not interface or channel??\n");
+ pr_err("PPP: not interface or channel??\n");
return -EINVAL;
}
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
vj = slhc_init(val2+1, val+1);
if (!vj) {
- printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+ netdev_err(ppp->dev,
+ "PPP: no memory (VJ compressor)\n");
err = -ENOMEM;
break;
}
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
{
int err;
- printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+ pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops);
if (err) {
- printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+ pr_err("failed to register PPP pernet device (%d)\n", err);
goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (err) {
- printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+ pr_err("failed to register PPP device (%d)\n", err);
goto out_net;
}
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
- printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
* the same number.
*/
if (net_ratelimit())
- printk(KERN_ERR "ppp: compressor dropped pkt\n");
+ netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
kfree_skb(skb);
kfree_skb(new_skb);
new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: outbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
- printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
- printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+ netdev_err(ppp->dev,
+ "ppp: compression required but "
+ "down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
- printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+ netdev_err(ppp->dev, "PPP: no memory "
+ "(VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
- printk(KERN_DEBUG "PPP: VJ decompression error\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: VJ decompression error\n");
goto err;
}
len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
- printk(KERN_ERR "PPP: VJ uncompressed error\n");
+ netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: inbound frame "
- "not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: inbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
ns = dev_alloc_skb(obuff_size);
if (!ns) {
- printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+ netdev_err(ppp->dev, "ppp_decompress_frame: "
+ "no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
- struct sk_buff *p, *next;
+ struct sk_buff *p, *tmp;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
return NULL;
head = list->next;
tail = NULL;
- for (p = head; p != (struct sk_buff *) list; p = next) {
- next = p->next;
+ skb_queue_walk_safe(list, p, tmp) {
+ again:
if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
- printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
- PPP_MP_CB(p)->sequence, seq);
- head = next;
+ netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+ "seq %u < %u\n",
+ PPP_MP_CB(p)->sequence, seq);
+ __skb_unlink(p, list);
+ kfree_skb(p);
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
lost = 1;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
- next = p;
- continue;
+ goto again;
}
/*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
(PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
- printk(KERN_DEBUG "PPP: reconstructed packet"
- " is too long (%d)\n", len);
- } else if (p == head) {
- /* fragment is complete packet - reuse skb */
- tail = p;
- skb = skb_get(p);
- break;
- } else if ((skb = dev_alloc_skb(len)) == NULL) {
- ++ppp->dev->stats.rx_missed_errors;
- printk(KERN_DEBUG "PPP: no memory for "
- "reconstructed packet");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
} else {
tail = p;
break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
- if (PPP_MP_CB(p)->BEbits & E)
- head = next;
+ if (PPP_MP_CB(p)->BEbits & E) {
+ struct sk_buff *tmp2;
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+ head = skb_peek(list);
+ if (!head)
+ break;
+ }
++seq;
}
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
if (ppp->debug & 1)
- printk(KERN_DEBUG " missed pkts %u..%u\n",
- ppp->nextseq,
- PPP_MP_CB(head)->sequence-1);
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
- if (head != tail)
- /* copy to a single skb */
- for (p = head; p != tail->next; p = p->next)
- skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
- ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
- head = tail->next;
- }
+ skb = head;
+ if (head != tail) {
+ struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+ p = skb_queue_next(list, head);
+ __skb_unlink(skb, list);
+ skb_queue_walk_from_safe(list, p, tmp) {
+ __skb_unlink(p, list);
+ *fragpp = p;
+ p->next = NULL;
+ fragpp = &p->next;
+
+ skb->len += p->len;
+ skb->data_len += p->len;
+ skb->truesize += p->len;
+
+ if (p == tail)
+ break;
+ }
+ } else {
+ __skb_unlink(skb, list);
+ }
- /* Discard all the skbuffs that we have copied the data out of
- or that we can't use. */
- while ((p = list->next) != head) {
- __skb_unlink(p, list);
- kfree_skb(p);
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
}
return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ret = register_netdev(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
- printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
+ netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
goto out2;
}
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
- "n_channels=%d !\n", ppp, ppp->file.dead,
- ppp->n_channels);
+ netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+ "but dead=%d n_channels=%d !\n",
+ ppp, ppp->file.dead, ppp->n_channels);
return;
}
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
if (!pch->file.dead) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying undead channel %p !\n",
- pch);
+ pr_err("ppp: destroying undead channel %p !\n", pch);
return;
}
skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
- printk(KERN_ERR "PPP: removing module but units remain!\n");
+ pr_err("PPP: removing module but units remain!\n");
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
again:
if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
+ pr_err("PPP: No free memory for idr\n");
return -ENOMEM;
}
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..713969accdbd 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -502,7 +502,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
- unsigned long features;
+ u32 features;
features = NETIF_F_TSO;
if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +519,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+ u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
if (enable)
net_dev->features |= features;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256a..c65270241d2d 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -906,7 +906,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- unsigned long offload_features;
+ u32 offload_features;
u32 reset_world_flags;
};
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee17..43654a3bb0ec 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+ .of_match_table = smc91x_match,
+#endif
},
};
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f83937..c1a344829b54 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (txmac_stat & MAC_TXSTAT_URUN) {
netdev_err(dev, "TX MAC xmit underrun\n");
- gp->net_stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
netdev_err(dev, "TX MAC max packet size error\n");
- gp->net_stats.tx_errors++;
+ dev->stats.tx_errors++;
}
/* The rest are all cases of one of the 16-bit TX
* counters expiring.
*/
if (txmac_stat & MAC_TXSTAT_NCE)
- gp->net_stats.collisions += 0x10000;
+ dev->stats.collisions += 0x10000;
if (txmac_stat & MAC_TXSTAT_ECE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
if (txmac_stat & MAC_TXSTAT_LCE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
/* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
u32 smac = readl(gp->regs + MAC_SMACHINE);
netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
- gp->net_stats.rx_over_errors++;
- gp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
ret = gem_rxmac_reset(gp);
}
if (rxmac_stat & MAC_RXSTAT_ACE)
- gp->net_stats.rx_frame_errors += 0x10000;
+ dev->stats.rx_frame_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_CCE)
- gp->net_stats.rx_crc_errors += 0x10000;
+ dev->stats.rx_crc_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_LCE)
- gp->net_stats.rx_length_errors += 0x10000;
+ dev->stats.rx_length_errors += 0x10000;
/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
* events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: no buffer for rx frame\n",
gp->dev->name);
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
gp->dev->name);
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
goto do_reset;
}
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
break;
}
gp->tx_skbs[entry] = NULL;
- gp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
entry = NEXT_TX(entry);
}
- gp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
dev_kfree_skb_irq(skb);
}
gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
static int gem_rx(struct gem *gp, int work_to_do)
{
+ struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
__sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
len = (status & RXDCTRL_BUFSZ) >> 16;
if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- gp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & RXDCTRL_BAD)
- gp->net_stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
/* We'll just return it to GEM. */
drop_it:
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto next;
}
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
netif_receive_skb(skb);
- gp->net_stats.rx_packets++;
- gp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
static struct net_device_stats *gem_get_stats(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
* so we shield against this
*/
if (gp->running) {
- stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
writel(0, gp->regs + MAC_FCSERR);
- stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
writel(0, gp->regs + MAC_AERR);
- stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
writel(0, gp->regs + MAC_LERR);
- stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
- stats->collisions +=
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
(readl(gp->regs + MAC_ECOLL) +
readl(gp->regs + MAC_LCOLL));
writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- return &gp->net_stats;
+ return &dev->stats;
}
static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def6..ede017872367 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@ struct gem {
u32 status;
struct napi_struct napi;
- struct net_device_stats net_stats;
int tx_fifo_sz;
int rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f69998..cc069528b322 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -60,20 +60,14 @@
#define BAR_0 0
#define BAR_2 2
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 116
+#define TG3_MIN_NUM 117
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 3, 2010"
+#define DRV_MODULE_RELDATE "January 25, 2011"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -134,9 +128,6 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define TG3_RX_DMA_ALIGN 16
-#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -1785,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tg3_phy_cl45_read(tp, MDIO_MMD_AN,
TG3_CL45_D7_EEERES_STAT, &val);
- if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
- val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ switch (val) {
+ case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_5719:
+ case ASIC_REV_57765:
+ /* Enable SM_DSP clock and tx 6dB coding. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+ /* Turn off SM_DSP clock. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ }
+ /* Fallthrough */
+ case TG3_CL45_D7_EEERES_STAT_LP_100TX:
tp->setlpicnt = 2;
+ }
}
if (!tp->setlpicnt) {
@@ -2977,11 +2988,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
MII_TG3_AUXCTL_ACTL_TX_6DB;
tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
- !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
- tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
- val | MII_TG3_DSP_CH34TP2_HIBW01);
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ /* Fall through */
+ case ASIC_REV_5719:
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ }
val = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -4722,8 +4741,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
- bool hw_vlan __maybe_unused = false;
- u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4799,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ copy_skb = netdev_alloc_skb(tp->dev, len +
TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4831,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
}
if (desc->type_flags & RXD_FLAG_VLAN &&
- !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
- vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
- if (tp->vlgrp)
- hw_vlan = true;
- else
-#endif
- {
- struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
- __skb_push(skb, VLAN_HLEN);
-
- memmove(ve, skb->data + VLAN_HLEN,
- ETH_ALEN * 2);
- ve->h_vlan_proto = htons(ETH_P_8021Q);
- ve->h_vlan_TCI = htons(vtag);
- }
- }
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb,
+ desc->err_vlan & RXD_VLAN_MASK);
-#if TG3_VLAN_TAG_USED
- if (hw_vlan)
- vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
- else
-#endif
- napi_gro_receive(&tnapi->napi, skb);
+ napi_gro_receive(&tnapi->napi, skb);
received++;
budget--;
@@ -5740,11 +5738,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
base_flags |= TXD_FLAG_TCPUDP_CSUM;
}
-#if TG3_VLAN_TAG_USED
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
len = skb_headlen(skb);
@@ -5986,11 +5982,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
}
}
-#if TG3_VLAN_TAG_USED
+
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -7834,7 +7829,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_DBTMR1_LNKIDLE_2047US);
tw32_f(TG3_CPMU_EEE_DBTMR2,
- TG3_CPMU_DBTMR1_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
}
@@ -8108,8 +8103,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
/* Setup replenish threshold. */
tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
@@ -8227,8 +8223,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
- val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
tw32(TG3_RDMA_RSRVCTRL_REG,
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8350,7 +8350,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
tw32(MSGINT_MODE, val);
@@ -9090,7 +9091,8 @@ static void tg3_ints_init(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
u32 msi_mode = tr32(MSGINT_MODE);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
@@ -9532,17 +9534,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
RX_MODE_KEEP_VLAN_TAG);
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
* flag clear.
*/
-#if TG3_VLAN_TAG_USED
- if (!tp->vlgrp &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
- rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
- /* By definition, VLAN is disabled always in this
- * case.
- */
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#endif
@@ -10873,13 +10868,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
* Normal traffic and PHY loopback are not affected by
- * errata.
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
return 0;
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ mac_mode |= MAC_MODE_PORT_INT_LPBACK;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10901,7 +10899,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
tg3_writephy(tp, MII_TG3_FET_PTEST,
MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10929,6 +10928,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
} else {
return -EINVAL;
}
@@ -11035,14 +11041,19 @@ out:
static int tg3_test_loopback(struct tg3 *tp)
{
int err = 0;
- u32 cpmuctrl = 0;
+ u32 eee_cap, cpmuctrl = 0;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
err = tg3_reset_hw(tp, 1);
- if (err)
- return TG3_LOOPBACK_FAILED;
+ if (err) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off gphy autopowerdown. */
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11062,8 +11073,10 @@ static int tg3_test_loopback(struct tg3 *tp)
udelay(10);
}
- if (status != CPMU_MUTEX_GNT_DRIVER)
- return TG3_LOOPBACK_FAILED;
+ if (status != CPMU_MUTEX_GNT_DRIVER) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off link-based power management. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11092,6 +11105,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
tg3_phy_toggle_apd(tp, true);
+done:
+ tp->phy_flags |= eee_cap;
+
return err;
}
@@ -11198,7 +11214,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11214,7 +11232,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11230,31 +11250,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!netif_running(dev)) {
- tp->vlgrp = grp;
- return;
- }
-
- tg3_netif_stop(tp);
-
- tg3_full_lock(tp, 0);
-
- tp->vlgrp = grp;
-
- /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
- __tg3_set_rx_mode(dev);
-
- tg3_netif_start(tp);
-
- tg3_full_unlock(tp);
-}
-#endif
-
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct tg3 *tp = netdev_priv(dev);
@@ -13066,9 +13061,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
{
-#if TG3_VLAN_TAG_USED
dev->vlan_features |= flags;
-#endif
}
static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13325,7 +13318,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Determine TSO capabilities */
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ ; /* Do nothing. HW bug. */
+ else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13376,7 +13371,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13394,42 +13390,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- u16 word;
-
- pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKSTA,
- &word);
- switch (word & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 2:
- tp->pcie_readrq = 2048;
- break;
- case 4:
- tp->pcie_readrq = 1024;
- break;
- }
- break;
-
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 1:
- tp->pcie_readrq = 2048;
- break;
- case 2:
- tp->pcie_readrq = 1024;
- break;
- case 4:
- tp->pcie_readrq = 512;
- break;
- }
- }
- }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tp->pcie_readrq = 2048;
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
@@ -13861,11 +13823,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_offset = NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
- tp->rx_offset -= NET_IP_ALIGN;
+ tp->rx_offset = 0;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -14629,9 +14591,6 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14648,9 +14607,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14700,9 +14656,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
-#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
tp = netdev_priv(dev);
tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c82..73884b69b749 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -141,6 +141,7 @@
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
#define CHIPREV_ID_57765_A0 0x57785000
+#define CHIPREV_ID_5719_A0 0x05719000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
#define TG3_CPMU_EEE_DBTMR2 0x000036b8
-#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
#define MII_TG3_DSP_TAP1 0x0001
#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_TAP26 0x001a
+#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
+#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
+#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_CH34TP2 0x4022
#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
@@ -2808,9 +2817,6 @@ struct tg3 {
u32 rx_std_max_post;
u32 rx_offset;
u32 rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
/* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ecc..0678e7e71f19 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -63,45 +63,45 @@
* - Other minor stuff
*
* v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
- * network cleanup in 2.3.43pre7 (Tigran & myself)
- * - Minor stuff.
+ * network cleanup in 2.3.43pre7 (Tigran & myself)
+ * - Minor stuff.
*
- * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
- * if no cable/link were present.
+ * v1.5 March 22, 2000 - Fixed another timer bug that would hang the
+ * driver if no cable/link were present.
* - Cosmetic changes.
* - TODO: Port completely to new PCI/DMA API
- * Auto-Neg fallback.
- *
- * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
- * tested it though, as the kernel support is currently
- * broken (2.3.99p4p3).
- * - Updated tlan.txt accordingly.
- * - Adjusted minimum/maximum frame length.
- * - There is now a TLAN website up at
- * http://hp.sourceforge.net/
- *
- * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
- * reports PHY information when used with Donald
- * Beckers userspace MII diagnostics utility.
- *
- * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
- * - Added link information to Auto-Neg and forced
- * modes. When NIC operates with auto-neg the driver
- * will report Link speed & duplex modes as well as
- * link partner abilities. When forced link is used,
- * the driver will report status of the established
- * link.
- * Please read tlan.txt for additional information.
- * - Removed call to check_region(), and used
- * return value of request_region() instead.
+ * Auto-Neg fallback.
+ *
+ * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters.
+ * Haven't tested it though, as the kernel support
+ * is currently broken (2.3.99p4p3).
+ * - Updated tlan.txt accordingly.
+ * - Adjusted minimum/maximum frame length.
+ * - There is now a TLAN website up at
+ * http://hp.sourceforge.net/
+ *
+ * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
+ * reports PHY information when used with Donald
+ * Beckers userspace MII diagnostics utility.
+ *
+ * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
+ * - Added link information to Auto-Neg and forced
+ * modes. When NIC operates with auto-neg the driver
+ * will report Link speed & duplex modes as well as
+ * link partner abilities. When forced link is used,
+ * the driver will report status of the established
+ * link.
+ * Please read tlan.txt for additional information.
+ * - Removed call to check_region(), and used
+ * return value of request_region() instead.
*
* v1.8a May 28, 2000 - Minor updates.
*
* v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
- * - Updated with timer fixes from Andrew Morton.
- * - Fixed module race in TLan_Open.
- * - Added routine to monitor PHY status.
- * - Added activity led support for Proliant devices.
+ * - Updated with timer fixes from Andrew Morton.
+ * - Fixed module race in TLan_Open.
+ * - Added routine to monitor PHY status.
+ * - Added activity led support for Proliant devices.
*
* v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
* like the Compaq NetFlex3/E.
@@ -111,8 +111,8 @@
* hardware probe is done with kernel API and
* TLan_EisaProbe.
* - Adjusted debug information for probing.
- * - Fixed bug that would cause general debug information
- * to be printed after driver removal.
+ * - Fixed bug that would cause general debug
+ * information to be printed after driver removal.
* - Added transmit timeout handling.
* - Fixed OOM return values in tlan_probe.
* - Fixed possible mem leak in tlan_exit
@@ -136,8 +136,8 @@
*
* v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
*
- * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
- * when link can't be established.
+ * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
+ * when link can't be established.
* - Added the bbuf option as a kernel parameter.
* - Fixed ioaddr probe bug.
* - Fixed stupid deadlock with MII interrupts.
@@ -147,28 +147,30 @@
* TLAN v1.0 silicon. This needs to be investigated
* further.
*
- * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
- * interrupt. Thanks goes to
- * Adam Keys <adam@ti.com>
- * Denis Beaudoin <dbeaudoin@ti.com>
- * for providing the patch.
- * - Fixed auto-neg output when using multiple
- * adapters.
- * - Converted to use new taskq interface.
+ * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
+ * interrupt. Thanks goes to
+ * Adam Keys <adam@ti.com>
+ * Denis Beaudoin <dbeaudoin@ti.com>
+ * for providing the patch.
+ * - Fixed auto-neg output when using multiple
+ * adapters.
+ * - Converted to use new taskq interface.
*
- * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
+ * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
*
* Samuel Chessman <chessman@tux.org> New Maintainer!
*
* v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
- * 10T half duplex no loopback
- * Thanks to Gunnar Eikman
+ * 10T half duplex no loopback
+ * Thanks to Gunnar Eikman
*
* Sakari Ailus <sakari.ailus@iki.fi>:
*
* v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
+ * v1.16 Jan 6 2011 - Make checkpatch.pl happy.
+ * v1.17 Jan 6 2011 - Add suspend/resume support.
*
- *******************************************************************************/
+ ******************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
@@ -185,13 +187,11 @@
#include "tlan.h"
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
/* For removing EISA devices */
-static struct net_device *TLan_Eisa_Devices;
+static struct net_device *tlan_eisa_devices;
-static int TLanDevicesInstalled;
+static int tlan_devices_installed;
/* Set speed, duplex and aui settings */
static int aui[MAX_TLAN_BOARDS];
@@ -202,7 +202,8 @@ module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
@@ -218,139 +219,144 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
-static const char TLanSignature[] = "TLAN";
-static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
static int tlan_have_pci;
static int tlan_have_eisa;
-static const char *media[] = {
- "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
};
static struct board {
- const char *deviceLabel;
- u32 flags;
- u16 addrOfs;
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
} board_info[] = {
{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/P",
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent Integrated 10/100 TX UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
- { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
- { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
- { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/E",
- TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
- { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
-static void TLan_EisaProbe( void );
-static void TLan_Eisa_Cleanup( void );
-static int TLan_Init( struct net_device * );
-static int TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int TLan_Close( struct net_device *);
-static struct net_device_stats *TLan_GetStats( struct net_device *);
-static void TLan_SetMulticastList( struct net_device *);
-static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
- int irq, int rev, const struct pci_device_id *ent);
-static void TLan_tx_timeout( struct net_device *dev);
-static void TLan_tx_timeout_work(struct work_struct *work);
-static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32 TLan_HandleTxEOF( struct net_device *, u16 );
-static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32 TLan_HandleRxEOF( struct net_device *, u16 );
-static u32 TLan_HandleDummy( struct net_device *, u16 );
-static u32 TLan_HandleTxEOC( struct net_device *, u16 );
-static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32 TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void TLan_Timer( unsigned long );
-
-static void TLan_ResetLists( struct net_device * );
-static void TLan_FreeLists( struct net_device * );
-static void TLan_PrintDio( u16 );
-static void TLan_PrintList( TLanList *, char *, int );
-static void TLan_ReadAndClearStats( struct net_device *, int );
-static void TLan_ResetAdapter( struct net_device * );
-static void TLan_FinishReset( struct net_device * );
-static void TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void TLan_PhyPrint( struct net_device * );
-static void TLan_PhyDetect( struct net_device * );
-static void TLan_PhyPowerDown( struct net_device * );
-static void TLan_PhyPowerUp( struct net_device * );
-static void TLan_PhyReset( struct net_device * );
-static void TLan_PhyStartLink( struct net_device * );
-static void TLan_PhyFinishAutoNeg( struct net_device * );
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
#ifdef MONITOR
-static void TLan_PhyMonitor( struct net_device * );
+static void tlan_phy_monitor(struct net_device *);
#endif
/*
-static int TLan_PhyNop( struct net_device * );
-static int TLan_PhyInternalCheck( struct net_device * );
-static int TLan_PhyInternalService( struct net_device * );
-static int TLan_PhyDp83840aCheck( struct net_device * );
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void TLan_MiiSendData( u16, u32, unsigned );
-static void TLan_MiiSync( u16 );
-static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
-static void TLan_EeSendStart( u16 );
-static int TLan_EeSendByte( u16, u8, int );
-static void TLan_EeReceiveByte( u16, u8 *, int );
-static int TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = addr;
@@ -358,7 +364,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
}
static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
{
unsigned long addr;
@@ -367,50 +373,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
return (struct sk_buff *) addr;
}
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
NULL,
- TLan_HandleTxEOF,
- TLan_HandleStatOverflow,
- TLan_HandleRxEOF,
- TLan_HandleDummy,
- TLan_HandleTxEOC,
- TLan_HandleStatusCheck,
- TLan_HandleRxEOC
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
};
static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function != NULL &&
- priv->timerType != TLAN_TIMER_ACTIVITY ) {
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = TLan_Timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
priv->timer.data = (unsigned long) dev;
- priv->timerSetAt = jiffies;
- priv->timerType = type;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
-} /* TLan_SetTimer */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
- These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
******************************************************************************
*****************************************************************************/
@@ -419,49 +425,117 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
- /***************************************************************
- * tlan_remove_one
- *
- * Returns:
- * Nothing
- * Parms:
- * None
- *
- * Goes through the TLanDevices list and frees the device
- * structs and memory associated with each device (lists
- * and buffers). It also ureserves the IO port regions
- * associated with this device.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata( pdev );
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
- unregister_netdev( dev );
+ unregister_netdev(dev);
- if ( priv->dmaStorage ) {
- pci_free_consistent(priv->pciDev,
- priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
pci_release_regions(pdev);
#endif
- free_netdev( dev );
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
- pci_set_drvdata( pdev, NULL );
+ return 0;
}
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
};
static int __init tlan_probe(void)
@@ -482,13 +556,13 @@ static int __init tlan_probe(void)
}
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
- TLan_EisaProbe();
+ tlan_eisa_probe();
printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
- TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
- tlan_have_pci, tlan_have_eisa);
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
- if (TLanDevicesInstalled == 0) {
+ if (tlan_devices_installed == 0) {
rc = -ENODEV;
goto err_out_pci_unreg;
}
@@ -501,39 +575,39 @@ err_out_pci_free:
}
-static int __devinit tlan_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- return TLan_probe1( pdev, -1, -1, 0, ent);
+ return tlan_probe1(pdev, -1, -1, 0, ent);
}
/*
- ***************************************************************
- * tlan_probe1
- *
- * Returns:
- * 0 on success, error code on error
- * Parms:
- * none
- *
- * The name is lower case to fit in with all the rest of
- * the netcard_probe names. This function looks for
- * another TLan based adapter, setting it up with the
- * allocated device struct if one is found.
- * tlan_probe has been ported to the new net API and
- * now allocates its own device structure. This function
- * is also used by modules.
- *
- **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
long ioaddr, int irq, int rev,
- const struct pci_device_id *ent )
+ const struct pci_device_id *ent)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
u16 device_id;
int reg, rc = -ENODEV;
@@ -543,7 +617,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
if (rc)
return rc;
- rc = pci_request_regions(pdev, TLanSignature);
+ rc = pci_request_regions(pdev, tlan_signature);
if (rc) {
printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
goto err_out;
@@ -551,7 +625,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
#endif /* CONFIG_PCI */
- dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
rc = -ENOMEM;
@@ -561,26 +635,28 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv = netdev_priv(dev);
- priv->pciDev = pdev;
+ priv->pci_dev = pdev;
priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
- u32 pci_io_base = 0;
+ u32 pci_io_base = 0;
priv->adapter = &board_info[ent->driver_data];
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+ printk(KERN_ERR
+ "TLAN: No suitable PCI mapping available.\n");
goto err_out_free_dev;
}
- for ( reg= 0; reg <= 5; reg ++ ) {
+ for (reg = 0; reg <= 5; reg++) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
- TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
- pci_io_base);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
break;
}
}
@@ -592,7 +668,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
- priv->adapterRev = pdev->revision;
+ priv->adapter_rev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
@@ -602,11 +678,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
device_id = inw(ioaddr + EISA_ID2);
priv->is_eisa = 1;
if (device_id == 0x20F1) {
- priv->adapter = &board_info[13]; /* NetFlex-3/E */
- priv->adapterRev = 23; /* TLAN 2.3 */
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
} else {
priv->adapter = &board_info[14];
- priv->adapterRev = 10; /* TLAN 1.0 */
+ priv->adapter_rev = 10; /* TLAN 1.0 */
}
dev->base_addr = ioaddr;
dev->irq = irq;
@@ -620,11 +696,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
: (dev->mem_start & 0x18) >> 3;
- if (priv->speed == 0x1) {
+ if (priv->speed == 0x1)
priv->speed = TLAN_SPEED_10;
- } else if (priv->speed == 0x2) {
+ else if (priv->speed == 0x2)
priv->speed = TLAN_SPEED_100;
- }
+
debug = priv->debug = dev->mem_end;
} else {
priv->aui = aui[boards_found];
@@ -635,11 +711,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
spin_lock_init(&priv->lock);
- rc = TLan_Init(dev);
+ rc = tlan_init(dev);
if (rc) {
printk(KERN_ERR "TLAN: Could not set up device.\n");
goto err_out_free_dev;
@@ -652,29 +728,29 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
- TLanDevicesInstalled++;
+ tlan_devices_installed++;
boards_found++;
/* pdev is NULL if this is an EISA device */
if (pdev)
tlan_have_pci++;
else {
- priv->nextDevice = TLan_Eisa_Devices;
- TLan_Eisa_Devices = dev;
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
tlan_have_eisa++;
}
printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
- dev->name,
- (int) dev->irq,
- (int) dev->base_addr,
- priv->adapter->deviceLabel,
- priv->adapterRev);
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
return 0;
err_out_uninit:
- pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -689,22 +765,23 @@ err_out:
}
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
- while( tlan_have_eisa ) {
- dev = TLan_Eisa_Devices;
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
priv = netdev_priv(dev);
- if (priv->dmaStorage) {
- pci_free_consistent(priv->pciDev, priv->dmaSize,
- priv->dmaStorage, priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
- release_region( dev->base_addr, 0x10);
- unregister_netdev( dev );
- TLan_Eisa_Devices = priv->nextDevice;
- free_netdev( dev );
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
tlan_have_eisa--;
}
}
@@ -715,7 +792,7 @@ static void __exit tlan_exit(void)
pci_unregister_driver(&tlan_driver);
if (tlan_have_eisa)
- TLan_Eisa_Cleanup();
+ tlan_eisa_cleanup();
}
@@ -726,24 +803,24 @@ module_exit(tlan_exit);
- /**************************************************************
- * TLan_EisaProbe
- *
- * Returns: 0 on success, 1 otherwise
- *
- * Parms: None
- *
- *
- * This functions probes for EISA devices and calls
- * TLan_probe1 when one is found.
- *
- *************************************************************/
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
-static void __init TLan_EisaProbe (void)
+static void __init tlan_eisa_probe(void)
{
- long ioaddr;
- int rc = -ENODEV;
- int irq;
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
u16 device_id;
if (!EISA_bus) {
@@ -754,15 +831,16 @@ static void __init TLan_EisaProbe (void)
/* Loop through all slots of the EISA bus */
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
- TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
- (int) ioaddr);
- if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
goto out;
if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +850,326 @@ static void __init TLan_EisaProbe (void)
device_id = inw(ioaddr + EISA_ID2);
if (device_id != 0x20F1 && device_id != 0x40F1) {
- release_region (ioaddr, 0x10);
+ release_region(ioaddr, 0x10);
goto out;
}
- if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */
- release_region (ioaddr, 0x10);
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
goto out2;
}
if (debug == 0x10)
- printk("Found one\n");
+ printk(KERN_INFO "Found one\n");
/* Get irq from board */
- switch (inb(ioaddr + 0xCC0)) {
- case(0x10):
- irq=5;
- break;
- case(0x20):
- irq=9;
- break;
- case(0x40):
- irq=10;
- break;
- case(0x80):
- irq=11;
- break;
- default:
- goto out;
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
}
/* Setup the newly found eisa adapter */
- rc = TLan_probe1( NULL, ioaddr, irq,
- 12, NULL);
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
continue;
- out:
- if (debug == 0x10)
- printk("None found\n");
- continue;
+out:
+ if (debug == 0x10)
+ printk(KERN_INFO "None found\n");
+ continue;
- out2: if (debug == 0x10)
- printk("Card found but it is not enabled, skipping\n");
- continue;
+out2:
+ if (debug == 0x10)
+ printk(KERN_INFO "Card found but it is not enabled, skipping\n");
+ continue;
}
-} /* TLan_EisaProbe */
+}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
{
disable_irq(dev->irq);
- TLan_HandleInterrupt(dev->irq, dev);
+ tlan_handle_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
-static const struct net_device_ops TLan_netdev_ops = {
- .ndo_open = TLan_Open,
- .ndo_stop = TLan_Close,
- .ndo_start_xmit = TLan_StartTx,
- .ndo_tx_timeout = TLan_tx_timeout,
- .ndo_get_stats = TLan_GetStats,
- .ndo_set_multicast_list = TLan_SetMulticastList,
- .ndo_do_ioctl = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = TLan_Poll,
+ .ndo_poll_controller = tlan_poll,
#endif
};
- /***************************************************************
- * TLan_Init
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev The structure of the device to be
- * init'ed.
- *
- * This function completes the initialization of the
- * device structure and driver. It reserves the IO
- * addresses, allocates memory for the lists and bounce
- * buffers, retrieves the MAC address from the eeprom
- * and assignes the device's methods.
- *
- **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
{
int dma_size;
- int err;
+ int err;
int i;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
priv = netdev_priv(dev);
- dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
- * ( sizeof(TLanList) );
- priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
- dma_size, &priv->dmaStorageDMA);
- priv->dmaSize = dma_size;
-
- if ( priv->dmaStorage == NULL ) {
- printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
- dev->name );
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ printk(KERN_ERR
+ "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name);
return -ENOMEM;
}
- memset( priv->dmaStorage, 0, dma_size );
- priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
- priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
- priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
- priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
err = 0;
- for ( i = 0; i < 6 ; i++ )
- err |= TLan_EeReadByte( dev,
- (u8) priv->adapter->addrOfs + i,
- (u8 *) &dev->dev_addr[i] );
- if ( err ) {
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
- dev->name,
- err );
+ dev->name,
+ err);
}
dev->addr_len = 6;
netif_carrier_off(dev);
/* Device methods */
- dev->netdev_ops = &TLan_netdev_ops;
+ dev->netdev_ops = &tlan_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
-} /* TLan_Init */
+}
- /***************************************************************
- * TLan_Open
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev Structure of device to be opened.
- *
- * This routine puts the driver and TLAN adapter in a
- * state where it is ready to send and receive packets.
- * It allocates the IRQ, resets and brings the adapter
- * out of reset, and allows interrupts. It also delays
- * the startup for autonegotiation or sends a Rx GO
- * command to the adapter, as appropriate.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int err;
- priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
- err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
- dev->name, dev );
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
- if ( err ) {
+ if (err) {
pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
- dev->name, dev->irq );
+ dev->name, dev->irq);
return err;
}
init_timer(&priv->timer);
- netif_start_queue(dev);
- /* NOTE: It might not be necessary to read the stats before a
- reset if you don't care what the values are.
- */
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_start(dev);
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
- dev->name, priv->tlanRev );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
return 0;
-} /* TLan_Open */
+}
- /**************************************************************
- * TLan_ioctl
- *
- * Returns:
- * 0 on success, error code otherwise
- * Params:
- * dev structure of device to receive ioctl.
- *
- * rq ifreq structure to hold userspace data.
- *
- * cmd ioctl command.
- *
- *
- *************************************************************/
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
- u32 phy = priv->phy[priv->phyNum];
+ u32 phy = priv->phy[priv->phy_num];
- if (!priv->phyOnline)
+ if (!priv->phy_online)
return -EAGAIN;
- switch(cmd) {
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = phy;
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
- case SIOCGMIIREG: /* Read MII PHY register. */
- TLan_MiiReadReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, &data->val_out);
- return 0;
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
- case SIOCSMIIREG: /* Write MII PHY register. */
- TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-} /* tlan_ioctl */
+}
- /***************************************************************
- * TLan_tx_timeout
- *
- * Returns: nothing
- *
- * Params:
- * dev structure of device which timed out
- * during transmit.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
{
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
/* Ok so we timed out, lets see what we can do about it...*/
- TLan_FreeLists( dev );
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue( dev );
+ netif_wake_queue(dev);
}
- /***************************************************************
- * TLan_tx_timeout_work
- *
- * Returns: nothing
- *
- * Params:
- * work work item of device which timed out
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
{
- TLanPrivateInfo *priv =
- container_of(work, TLanPrivateInfo, tlan_tqueue);
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
- TLan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev);
}
- /***************************************************************
- * TLan_StartTx
- *
- * Returns:
- * 0 on success, non-zero on failure.
- * Parms:
- * skb A pointer to the sk_buff containing the
- * frame to be sent.
- * dev The device to send the data on.
- *
- * This function adds a frame to the Tx list to be sent
- * ASAP. First it verifies that the adapter is ready and
- * there is room in the queue. Then it sets up the next
- * available list, copies the frame to the corresponding
- * buffer. If the adapter Tx channel is idle, it gives
- * the adapter a Tx Go command on the list, otherwise it
- * sets the forward address of the previous list to point
- * to this one. Then it frees the sk_buff.
- *
- **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t tail_list_phys;
- TLanList *tail_list;
+ struct tlan_list *tail_list;
unsigned long flags;
unsigned int txlen;
- if ( ! priv->phyOnline ) {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
- dev->name );
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1100,218 +1178,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
return NETDEV_TX_OK;
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
- tail_list = priv->txList + priv->txTail;
- tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
- if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
- dev->name, priv->txHead, priv->txTail );
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
netif_stop_queue(dev);
- priv->txBusyCount++;
+ priv->tx_busy_count++;
return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data, txlen,
PCI_DMA_TODEVICE);
- TLan_StoreSKB(tail_list, skb);
+ tlan_store_skb(tail_list, skb);
- tail_list->frameSize = (u16) txlen;
+ tail_list->frame_size = (u16) txlen;
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
spin_lock_irqsave(&priv->lock, flags);
- tail_list->cStat = TLAN_CSTAT_READY;
- if ( ! priv->txInProgress ) {
- priv->txInProgress = 1;
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
- outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
} else {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
- priv->txTail );
- if ( priv->txTail == 0 ) {
- ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
= tail_list_phys;
} else {
- ( priv->txList + ( priv->txTail - 1 ) )->forward
+ (priv->tx_list + (priv->tx_tail - 1))->forward
= tail_list_phys;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
- CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
return NETDEV_TX_OK;
-} /* TLan_StartTx */
+}
- /***************************************************************
- * TLan_HandleInterrupt
- *
- * Returns:
- * Nothing
- * Parms:
- * irq The line on which the interrupt
- * occurred.
- * dev_id A pointer to the device assigned to
- * this irq line.
- *
- * This function handles an interrupt generated by its
- * assigned TLAN adapter. The function deactivates
- * interrupts on its adapter, records the type of
- * interrupt, executes the appropriate subhandler, and
- * acknowdges the interrupt to the adapter (thus
- * re-enabling adapter interrupts.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 host_int;
u16 type;
spin_lock(&priv->lock);
- host_int = inw( dev->base_addr + TLAN_HOST_INT );
- type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
- if ( type ) {
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
u32 ack;
u32 host_cmd;
- outw( host_int, dev->base_addr + TLAN_HOST_INT );
- ack = TLanIntVector[type]( dev, host_int );
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
- if ( ack ) {
- host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
- outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
}
}
spin_unlock(&priv->lock);
return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
- /***************************************************************
- * TLan_Close
- *
- * Returns:
- * An error code.
- * Parms:
- * dev The device structure of the device to
- * close.
- *
- * This function shuts down the adapter. It records any
- * stats, puts the adapter into reset state, deactivates
- * its time as needed, and frees the irq it is using.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
priv->neg_be_verbose = 0;
+ tlan_stop(dev);
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
- if ( priv->timer.function != NULL ) {
- del_timer_sync( &priv->timer );
- priv->timer.function = NULL;
- }
-
- free_irq( dev->irq, dev );
- TLan_FreeLists( dev );
- TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
return 0;
-} /* TLan_Close */
+}
- /***************************************************************
- * TLan_GetStats
- *
- * Returns:
- * A pointer to the device's statistics structure.
- * Parms:
- * dev The device structure to return the
- * stats for.
- *
- * This function updates the devices statistics by reading
- * the TLAN chip's onboard registers. Then it returns the
- * address of the statistics structure.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
/* Should only read stats if open ? */
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
- priv->rxEocCount );
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
- priv->txBusyCount );
- if ( debug & TLAN_DEBUG_GNRL ) {
- TLan_PrintDio( dev->base_addr );
- TLan_PhyPrint( dev );
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
}
- if ( debug & TLAN_DEBUG_LIST ) {
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
- TLan_PrintList( priv->rxList + i, "RX", i );
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
- TLan_PrintList( priv->txList + i, "TX", i );
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
}
return &dev->stats;
-} /* TLan_GetStats */
+}
- /***************************************************************
- * TLan_SetMulticastList
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure to set the
- * multicast list for.
- *
- * This function sets the TLAN adaptor to various receive
- * modes. If the IFF_PROMISC flag is set, promiscuous
- * mode is acitviated. Otherwise, promiscuous mode is
- * turned off. If the IFF_ALLMULTI flag is set, then
- * the hash table is set to receive all group addresses.
- * Otherwise, the first three multicast addresses are
- * stored in AREG_1-3, and the rest are selected via the
- * hash table, as necessary.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 hash1 = 0;
@@ -1320,53 +1394,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
u32 offset;
u8 tmp;
- if ( dev->flags & IFF_PROMISC ) {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
} else {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
- if ( dev->flags & IFF_ALLMULTI ) {
- for ( i = 0; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
} else {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- if ( i < 3 ) {
- TLan_SetMac( dev, i + 1,
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
(char *) &ha->addr);
} else {
- offset = TLan_HashFunc((u8 *)&ha->addr);
- if ( offset < 32 )
- hash1 |= ( 1 << offset );
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
else
- hash2 |= ( 1 << ( offset - 32 ) );
+ hash2 |= (1 << (offset - 32));
}
i++;
}
- for ( ; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
}
}
-} /* TLan_SetMulticastList */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
- Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
- Programmer's Guide" for more informations on handling interrupts
- generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
******************************************************************************
*****************************************************************************/
@@ -1374,46 +1451,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
- /***************************************************************
- * TLan_HandleTxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Tx EOF interrupts which are raised
- * by the adapter when it has completed sending the
- * contents of a buffer. If detemines which list/buffer
- * was completed and resets it. If the buffer was the last
- * in the channel (EOC), then the function checks to see if
- * another buffer is ready to send, and if so, sends a Tx
- * Go command. Finally, the driver activates/continues the
- * activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 0;
- u16 tmpCStat;
+ u16 tmp_c_stat;
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
- while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- struct sk_buff *skb = TLan_GetSKB(head_list);
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
@@ -1421,304 +1500,311 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
- if ( tmpCStat & TLAN_CSTAT_EOC )
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
- dev->stats.tx_bytes += head_list->frameSize;
+ dev->stats.tx_bytes += head_list->frame_size;
- head_list->cStat = TLAN_CSTAT_UNUSED;
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
netif_start_queue(dev);
- CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
- head_list = priv->txList + priv->txHead;
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted TX frame.\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if (head_list->c_stat & TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
- priv->timer.data = (unsigned long) dev;
- priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
- add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleTxEOF */
+}
- /***************************************************************
- * TLan_HandleStatOverflow
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Statistics Overflow interrupt
- * which means that one or more of the TLAN statistics
- * registers has reached 1/2 capacity and needs to be read.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
{
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
return 1;
-} /* TLan_HandleStatOverflow */
-
-
-
-
- /***************************************************************
- * TLan_HandleRxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Rx EOF interrupt which
- * indicates a frame has been received by the adapter from
- * the net and the frame has been transferred to memory.
- * The function determines the bounce buffer the frame has
- * been loaded into, creates a new sk_buff big enough to
- * hold the frame, and sends it to protocol stack. It
- * then resets the used buffer and appends it to the end
- * of the list. If the frame was the last in the Rx
- * channel (EOC), the function restarts the receive channel
- * by sending an Rx Go command to the adapter. Then it
- * activates/continues the activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack = 0;
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
struct sk_buff *skb;
- TLanList *tail_list;
- u16 tmpCStat;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
dma_addr_t head_list_phys;
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
- while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- dma_addr_t frameDma = head_list->buffer[0].address;
- u32 frameSize = head_list->frameSize;
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
struct sk_buff *new_skb;
ack++;
- if (tmpCStat & TLAN_CSTAT_EOC)
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
new_skb = netdev_alloc_skb_ip_align(dev,
TLAN_MAX_FRAME_SIZE + 5);
- if ( !new_skb )
+ if (!new_skb)
goto drop_and_reuse;
- skb = TLan_GetSKB(head_list);
- pci_unmap_single(priv->pciDev, frameDma,
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- skb_put( skb, frameSize );
+ skb_put(skb, frame_size);
- dev->stats.rx_bytes += frameSize;
+ dev->stats.rx_bytes += frame_size;
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
- head_list->buffer[0].address = pci_map_single(priv->pciDev,
- new_skb->data,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(head_list, new_skb);
+ tlan_store_skb(head_list, new_skb);
drop_and_reuse:
head_list->forward = 0;
- head_list->cStat = 0;
- tail_list = priv->rxList + priv->rxTail;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
tail_list->forward = head_list_phys;
- CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
- CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted RX frame.\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleRxEOF */
+}
- /***************************************************************
- * TLan_HandleDummy
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Dummy interrupt, which is
- * raised whenever a test interrupt is generated by setting
- * the Req_Int bit of HOST_CMD to 1.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
{
- printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ pr_info("TLAN: Test interrupt on %s.\n", dev->name);
return 1;
-} /* TLan_HandleDummy */
+}
- /***************************************************************
- * TLan_HandleTxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Tx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * functionality, so process EOC events if this is the
- * case.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
- TLanList *head_list;
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 1;
host_int = 0;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if (head_list->c_stat & TLAN_CSTAT_READY) {
netif_stop_queue(dev);
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
return ack;
-} /* TLan_HandleTxEOC */
+}
- /***************************************************************
- * TLan_HandleStatusCheck
- *
- * Returns:
- * 0 if Adapter check, 1 if Network Status check.
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Adapter Check/Network Status
- * interrupts generated by the adapter. It checks the
- * vector in the HOST_INT register to determine if it is
- * an Adapter Check interrupt. If so, it resets the
- * adapter. Otherwise it clears the status registers
- * and services the PHY.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack;
u32 error;
u8 net_sts;
@@ -1727,92 +1813,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
u16 tlphy_sts;
ack = 1;
- if ( host_int & TLAN_HI_IV_MASK ) {
- netif_stop_queue( dev );
- error = inl( dev->base_addr + TLAN_CH_PARM );
- printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
schedule_work(&priv->tlan_tqueue);
netif_wake_queue(dev);
ack = 0;
} else {
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
- phy = priv->phy[priv->phyNum];
-
- net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
- if ( net_sts ) {
- TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
- dev->name, (unsigned) net_sts );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
}
- if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
- ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl |= TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
- ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl &= ~TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- }
-
- if (debug) {
- TLan_PhyPrint( dev );
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
+
+ if (debug)
+ tlan_phy_print(dev);
}
}
return ack;
-} /* TLan_HandleStatusCheck */
+}
- /***************************************************************
- * TLan_HandleRxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Rx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * CSTAT member or a INTDIS register, so if this chip is
- * pre-3.0, process EOC interrupts normally.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t head_list_phys;
u32 ack = 1;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->rxHead, priv->rxTail );
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
return ack;
-} /* TLan_HandleRxEOC */
+}
@@ -1820,98 +1908,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_Timer
- *
- * Returns:
- * Nothing
- * Parms:
- * data A value given to add timer when
- * add_timer was called.
- *
- * This function handles timed functionality for the
- * TLAN driver. The two current timer uses are for
- * delaying for autonegotionation and driving the ACT LED.
- * - Autonegotiation requires being allowed about
- * 2 1/2 seconds before attempting to transmit a
- * packet. It would be a very bad thing to hang
- * the kernel this long, so the driver doesn't
- * allow transmission 'til after this time, for
- * certain PHYs. It would be much nicer if all
- * PHYs were interrupt-capable like the internal
- * PHY.
- * - The ACT LED, which shows adapter activity, is
- * driven by the driver, and so must be left on
- * for a short period to power up the LED so it
- * can be seen. This delay can be changed by
- * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
- * if desired. 100 ms produces a slightly
- * sluggish response.
- *
- **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 elapsed;
unsigned long flags = 0;
priv->timer.function = NULL;
- switch ( priv->timerType ) {
+ switch (priv->timer_type) {
#ifdef MONITOR
- case TLAN_TIMER_LINK_BEAT:
- TLan_PhyMonitor( dev );
- break;
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
#endif
- case TLAN_TIMER_PHY_PDOWN:
- TLan_PhyPowerDown( dev );
- break;
- case TLAN_TIMER_PHY_PUP:
- TLan_PhyPowerUp( dev );
- break;
- case TLAN_TIMER_PHY_RESET:
- TLan_PhyReset( dev );
- break;
- case TLAN_TIMER_PHY_START_LINK:
- TLan_PhyStartLink( dev );
- break;
- case TLAN_TIMER_PHY_FINISH_AN:
- TLan_PhyFinishAutoNeg( dev );
- break;
- case TLAN_TIMER_FINISH_RESET:
- TLan_FinishReset( dev );
- break;
- case TLAN_TIMER_ACTIVITY:
- spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function == NULL ) {
- elapsed = jiffies - priv->timerSetAt;
- if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK );
- } else {
- priv->timer.function = TLan_Timer;
- priv->timer.expires = priv->timerSetAt
- + TLAN_TIMER_ACT_DELAY;
- spin_unlock_irqrestore(&priv->lock, flags);
- add_timer( &priv->timer );
- break;
- }
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
}
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- default:
- break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
}
-} /* TLan_Timer */
+}
@@ -1919,39 +2007,39 @@ static void TLan_Timer( unsigned long data )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_ResetLists
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure with the list
- * stuctures to be reset.
- *
- * This routine sets the variables associated with managing
- * the TLAN lists to their initial values.
- *
- **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
dma_addr_t list_phys;
struct sk_buff *skb;
- priv->txHead = 0;
- priv->txTail = 0;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- list->cStat = TLAN_CSTAT_UNUSED;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
list->buffer[0].address = 0;
list->buffer[2].count = 0;
list->buffer[2].address = 0;
@@ -1959,169 +2047,169 @@ static void TLan_ResetLists( struct net_device *dev )
list->buffer[9].address = 0;
}
- priv->rxHead = 0;
- priv->rxTail = TLAN_NUM_RX_LISTS - 1;
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- list_phys = priv->rxListDMA + sizeof(TLanList) * i;
- list->cStat = TLAN_CSTAT_READY;
- list->frameSize = TLAN_MAX_FRAME_SIZE;
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if ( !skb ) {
- pr_err("TLAN: out of memory for received data.\n" );
+ if (!skb) {
+ pr_err("TLAN: out of memory for received data.\n");
break;
}
- list->buffer[0].address = pci_map_single(priv->pciDev,
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(list, skb);
+ tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
- list->forward = list_phys + sizeof(TLanList);
+ list->forward = list_phys + sizeof(struct tlan_list);
}
/* in case ran out of memory early, clear bits */
while (i < TLAN_NUM_RX_LISTS) {
- TLan_StoreSKB(priv->rxList + i, NULL);
+ tlan_store_skb(priv->rx_list + i, NULL);
++i;
}
list->forward = 0;
-} /* TLan_ResetLists */
+}
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
struct sk_buff *skb;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
pci_unmap_single(
- priv->pciDev,
+ priv->pci_dev,
list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
- pci_unmap_single(priv->pciDev,
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
list->buffer[0].address,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
-} /* TLan_FreeLists */
+}
- /***************************************************************
- * TLan_PrintDio
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base Base IO port of the device of
- * which to print DIO registers.
- *
- * This function prints out all the internal (DIO)
- * registers of a TLAN chip.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
{
u32 data0, data1;
int i;
- printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
- io_base );
- printk( "TLAN: Off. +0 +4\n" );
- for ( i = 0; i < 0x4C; i+= 8 ) {
- data0 = TLan_DioRead32( io_base, i );
- data1 = TLan_DioRead32( io_base, i + 0x4 );
- printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n",
+ io_base);
+ pr_info("TLAN: Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1);
}
-} /* TLan_PrintDio */
+}
- /***************************************************************
- * TLan_PrintList
- *
- * Returns:
- * Nothing
- * Parms:
- * list A pointer to the TLanList structure to
- * be printed.
- * type A string to designate type of list,
- * "Rx" or "Tx".
- * num The index of the list.
- *
- * This function prints out the contents of the list
- * pointed to by the list parameter.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
{
int i;
- printk( "TLAN: %s List %d at %p\n", type, num, list );
- printk( "TLAN: Forward = 0x%08x\n", list->forward );
- printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
- printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
- /* for ( i = 0; i < 10; i++ ) { */
- for ( i = 0; i < 2; i++ ) {
- printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
- i, list->buffer[i].count, list->buffer[i].address );
+ pr_info("TLAN: %s List %d at %p\n", type, num, list);
+ pr_info("TLAN: Forward = 0x%08x\n", list->forward);
+ pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
}
-} /* TLan_PrintList */
+}
- /***************************************************************
- * TLan_ReadAndClearStats
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * to which to read stats.
- * record Flag indicating whether to add
- *
- * This functions reads all the internal status registers
- * of the TLAN chip, which clears them as a side effect.
- * It then either adds the values to the device's status
- * struct, or discards them, depending on whether record
- * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
- *
- **************************************************************/
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
{
u32 tx_good, tx_under;
u32 rx_good, rx_over;
@@ -2129,41 +2217,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
u32 multi_col, single_col;
u32 excess_col, late_col, loss;
- outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
- def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
- def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
- multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
- outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
- late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
- loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
- if ( record ) {
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
dev->stats.rx_packets += rx_good;
dev->stats.rx_errors += rx_over + crc + code;
dev->stats.tx_packets += tx_good;
dev->stats.tx_errors += tx_under + loss;
- dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
dev->stats.rx_over_errors += rx_over;
dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2262,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
dev->stats.tx_carrier_errors += loss;
}
-} /* TLan_ReadAndClearStats */
+}
- /***************************************************************
- * TLan_Reset
- *
- * Returns:
- * 0
- * Parms:
- * dev Pointer to device structure of adapter
- * to be reset.
- *
- * This function resets the adapter and it's physical
- * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
- * Programmer's Guide" for details. The routine tries to
- * implement what is detailed there, though adjustments
- * have been made.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
u32 addr;
u32 data;
u8 data8;
- priv->tlanFullDuplex = false;
- priv->phyOnline=0;
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
netif_carrier_off(dev);
/* 1. Assert reset bit. */
@@ -2216,7 +2305,7 @@ TLan_ResetAdapter( struct net_device *dev )
udelay(1000);
-/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+/* 2. Turn off interrupts. (Probably isn't necessary) */
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2313,208 @@ TLan_ResetAdapter( struct net_device *dev )
/* 3. Clear AREGs and HASHs. */
- for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
- TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
- }
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
/* 4. Setup NetConfig register. */
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
- outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
- outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
- outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
/* 7. Setup the remaining registers. */
- if ( priv->tlanRev >= 0x30 ) {
+ if (priv->tlan_rev >= 0x30) {
data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
- TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
}
- TLan_PhyDetect( dev );
+ tlan_phy_detect(dev);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
- if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
data |= TLAN_NET_CFG_BIT;
- if ( priv->aui == 1 ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
- } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = true;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
} else {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
}
}
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_CFG_PHY_EN;
- }
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- TLan_FinishReset( dev );
- } else {
- TLan_PhyPowerDown( dev );
- }
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
-} /* TLan_ResetAdapter */
+}
static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u8 data;
u32 phy;
u8 sio;
u16 status;
u16 partner;
u16 tlphy_ctl;
- u16 tlphy_par;
+ u16 tlphy_par;
u16 tlphy_id1, tlphy_id2;
- int i;
+ int i;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
- if ( priv->tlanFullDuplex ) {
+ if (priv->tlan_full_duplex)
data |= TLAN_NET_CMD_DUPLEX;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_MASK_MASK7;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
- TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
- if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
- ( priv->aui ) ) {
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
status = MII_GS_LINK;
- printk( "TLAN: %s: Link forced.\n", dev->name );
+ pr_info("TLAN: %s: Link forced.\n", dev->name);
} else {
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- if ( (status & MII_GS_LINK) &&
- /* We only support link info on Nat.Sem. PHY's */
- (tlphy_id1 == NAT_SEM_ID1) &&
- (tlphy_id2 == NAT_SEM_ID2) ) {
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
- printk( "TLAN: %s: Link active with ", dev->name );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ pr_info("TLAN: %s: Link active with ", dev->name);
if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
- printk( "forced 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+ pr_info("forced 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
} else {
- printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
- printk("TLAN: Partner capability: ");
- for (i = 5; i <= 10; i++)
- if (partner & (1<<i))
- printk("%s",media[i-5]);
+ pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "half");
+ pr_info("TLAN: Partner capability: ");
+ for (i = 5; i <= 10; i++)
+ if (partner & (1<<i))
+ printk("%s", media[i-5]);
printk("\n");
}
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
#ifdef MONITOR
/* We have link beat..for now anyway */
- priv->link = 1;
- /*Enabling link beat monitoring */
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
#endif
} else if (status & MII_GS_LINK) {
- printk( "TLAN: %s: Link active\n", dev->name );
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ pr_info("TLAN: %s: Link active\n", dev->name);
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
}
}
- if ( priv->phyNum == 0 ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- tlphy_ctl |= TLAN_TC_INTEN;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
- sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
- sio |= TLAN_NET_SIO_MINTEN;
- TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
- }
-
- if ( status & MII_GS_LINK ) {
- TLan_SetMac( dev, 0, dev->dev_addr );
- priv->phyOnline = 1;
- outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
- outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- }
- outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
netif_carrier_on(dev);
} else {
- printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
- dev->name );
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+ pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n",
+ dev->name);
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
return;
}
- TLan_SetMulticastList(dev);
+ tlan_set_multicast_list(dev);
-} /* TLan_FinishReset */
+}
- /***************************************************************
- * TLan_SetMac
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * on which to change the AREG.
- * areg The AREG to set the address in (0 - 3).
- * mac A pointer to an array of chars. Each
- * element stores one byte of the address.
- * IE, it isn't in ascii.
- *
- * This function transfers a MAC address to one of the
- * TLAN AREGs (address registers). The TLAN chip locks
- * the register on writing to offset 0 and unlocks the
- * register after writing to offset 5. If NULL is passed
- * in mac, then the AREG is filled with 0's.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
{
int i;
areg *= 6;
- if ( mac != NULL ) {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, mac[i] );
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
} else {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, 0 );
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
}
-} /* TLan_SetMac */
+}
@@ -2432,205 +2522,202 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
******************************************************************************
*****************************************************************************/
- /*********************************************************************
- * TLan_PhyPrint
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the
- * TLAN device having the PHYs to be detailed.
- *
- * This function prints the registers a PHY (aka transceiver).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
- phy = priv->phy[priv->phyNum];
-
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
- } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
- printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3\n" );
- for ( i = 0; i < 0x20; i+= 4 ) {
- printk( "TLAN: 0x%02x", i );
- TLan_MiiReadReg( dev, phy, i, &data0 );
- printk( " 0x%04hx", data0 );
- TLan_MiiReadReg( dev, phy, i + 1, &data1 );
- printk( " 0x%04hx", data1 );
- TLan_MiiReadReg( dev, phy, i + 2, &data2 );
- printk( " 0x%04hx", data2 );
- TLan_MiiReadReg( dev, phy, i + 3, &data3 );
- printk( " 0x%04hx\n", data3 );
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name);
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy);
+ pr_info("TLAN: Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ pr_info("TLAN: 0x%02x", i);
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ printk(" 0x%04hx", data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ printk(" 0x%04hx", data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ printk(" 0x%04hx", data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ printk(" 0x%04hx\n", data3);
}
} else {
- printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name);
}
-} /* TLan_PhyPrint */
+}
- /*********************************************************************
- * TLan_PhyDetect
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the adapter
- * for which the PHY needs determined.
- *
- * So far I've found that adapters which have external PHYs
- * may also use the internal PHY for part of the functionality.
- * (eg, AUI/Thinnet). This function finds out if this TLAN
- * chip has an internal PHY, and then finds the first external
- * PHY (starting from address 0) if it exists).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 control;
u16 hi;
u16 lo;
u32 phy;
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- priv->phyNum = 0xFFFF;
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
return;
}
- TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
- if ( hi != 0xFFFF ) {
+ if (hi != 0xffff)
priv->phy[0] = TLAN_PHY_MAX_ADDR;
- } else {
+ else
priv->phy[0] = TLAN_PHY_NONE;
- }
priv->phy[1] = TLAN_PHY_NONE;
- for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
- if ( ( control != 0xFFFF ) ||
- ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
- TLAN_DBG( TLAN_DEBUG_GNRL,
- "PHY found at %02x %04x %04x %04x\n",
- phy, control, hi, lo );
- if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
- ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
priv->phy[1] = phy;
}
}
}
- if ( priv->phy[1] != TLAN_PHY_NONE ) {
- priv->phyNum = 1;
- } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
- priv->phyNum = 0;
- } else {
- printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
- }
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ pr_info("TLAN: Cannot initialize device, no PHY was found!\n");
-} /* TLan_PhyDetect */
+}
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- if ( ( priv->phyNum == 0 ) &&
- ( priv->phy[1] != TLAN_PHY_NONE ) &&
- ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
}
/* Wait for 50 ms and powerup
* This is abitrary. It is intended to make sure the
* transceiver settles.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
-} /* TLan_PhyPowerDown */
+}
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK;
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
/* Wait for 500 ms and reset the
* transceiver. The TLAN docs say both 50 ms and
* 500 ms, so do the longer, just in case.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
-} /* TLan_PhyPowerUp */
+}
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 value;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK | MII_GC_RESET;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- while ( value & MII_GC_RESET ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
/* Wait for 500 ms and initialize.
* I don't remember why I wait this long.
* I've changed this to 50ms, as it seems long enough.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
-} /* TLan_PhyReset */
+}
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 ability;
u16 control;
u16 data;
@@ -2638,86 +2725,88 @@ static void TLan_PhyStartLink( struct net_device *dev )
u16 status;
u16 tctl;
- phy = priv->phy[priv->phyNum];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
- if ( ( status & MII_GS_AUTONEG ) &&
- ( ! priv->aui ) ) {
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
ability = status >> 11;
- if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
- } else if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
} else {
/* Set Auto-Neg advertisement */
- TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
/* Enablee Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
/* Restart Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
/* Wait for 4 sec for autonegotiation
- * to complete. The max spec time is less than this
- * but the card need additional time to start AN.
- * .5 sec should be plenty extra.
- */
- printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ pr_info("TLAN: %s: Starting autonegotiation.\n",
+ dev->name);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
}
- if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
- } else if ( priv->phyNum == 0 ) {
+ } else if (priv->phy_num == 0) {
control = 0;
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
- if ( priv->aui ) {
- tctl |= TLAN_TC_AUISEL;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
} else {
- tctl &= ~TLAN_TC_AUISEL;
- if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = true;
+ priv->tlan_full_duplex = true;
}
- if ( priv->speed == TLAN_SPEED_100 ) {
+ if (priv->speed == TLAN_SPEED_100)
control |= MII_GC_SPEEDSEL;
- }
}
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
}
/* Wait for 2 sec to give the transceiver time
* to establish link.
*/
- TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyStartLink */
+}
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 an_adv;
u16 an_lpa;
u16 data;
@@ -2725,115 +2814,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
u16 phy;
u16 status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
- if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ if (!(status & MII_GS_AUTOCMPLT)) {
/* Wait for 8 sec to give the process
* more time. Perhaps we should fail after a while.
*/
- if (!priv->neg_be_verbose++) {
- pr_info("TLAN: Giving autonegotiation more time.\n");
- pr_info("TLAN: Please check that your adapter has\n");
- pr_info("TLAN: been properly connected to a HUB or Switch.\n");
- pr_info("TLAN: Trying to establish link in the background...\n");
- }
- TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ if (!priv->neg_be_verbose++) {
+ pr_info("TLAN: Giving autonegotiation more time.\n");
+ pr_info("TLAN: Please check that your adapter has\n");
+ pr_info("TLAN: been properly connected to a HUB or Switch.\n");
+ pr_info("TLAN: Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
- printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
mode = an_adv & an_lpa & 0x03E0;
- if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = true;
- } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = true;
- }
-
- if ( ( ! ( mode & 0x0180 ) ) &&
- ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
- ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
}
- if ( priv->phyNum == 0 ) {
- if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
- ( an_adv & an_lpa & 0x0040 ) ) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
- MII_GC_AUTOENB | MII_GC_DUPLEX );
- pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n");
} else {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
- pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n");
}
}
/* Wait for 100 ms. No reason in partiticular.
*/
- TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyFinishAutoNeg */
+}
#ifdef MONITOR
- /*********************************************************************
- *
- * TLan_phyMonitor
- *
- * Returns:
- * None
- *
- * Params:
- * dev The device structure of this device.
- *
- *
- * This function monitors PHY condition by reading the status
- * register via the MII bus. This can be used to give info
- * about link changes (up/down), and possible switch to alternate
- * media.
- *
- * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 phy_status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- /* Get PHY status register */
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
- /* Check if link has been lost */
- if (!(phy_status & MII_GS_LINK)) {
- if (priv->link) {
- priv->link = 0;
- printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
- netif_carrier_off(dev);
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
- return;
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
}
}
- /* Link restablished? */
- if ((phy_status & MII_GS_LINK) && !priv->link) {
- priv->link = 1;
- printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
netif_carrier_on(dev);
- }
+ }
/* Setup a new monitor */
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
}
#endif /* MONITOR */
@@ -2842,47 +2934,48 @@ void TLan_PhyMonitor( struct net_device *dev )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
- These routines are based on the information in Chap. 2 of the
- "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_MiiReadReg
- *
- * Returns:
- * false if ack received ok
- * true if no ack received or other error
- *
- * Parms:
- * dev The device structure containing
- * The io address and interrupt count
- * for this device.
- * phy The address of the PHY to be queried.
- * reg The register whose contents are to be
- * retrieved.
- * val A pointer to a variable to store the
- * retrieved value.
- *
- * This function uses the TLAN's MII bus to retrieve the contents
- * of a given register on a PHY. It sends the appropriate info
- * and then reads the 16-bit register value from the MII bus via
- * the TLAN SIO register.
- *
- **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
- u32 i;
+ u32 i;
bool err;
int minten;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
err = false;
@@ -2892,48 +2985,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
- nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
- if (nack) { /* No ACK, so fake it */
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
for (i = 0; i < 16; i++) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
tmp |= i;
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
*val = tmp;
@@ -2942,116 +3035,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
return err;
-} /* TLan_MiiReadReg */
+}
- /***************************************************************
- * TLan_MiiSendData
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- * dev The address of the PHY to be queried.
- * data The value to be placed on the MII bus.
- * num_bits The number of bits in data that are to
- * be placed on the MII bus.
- *
- * This function sends on sequence of bits on the MII
- * configuration bus.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
{
u16 sio;
u32 i;
- if ( num_bits == 0 )
+ if (num_bits == 0)
return;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
- for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
- if ( data & i )
- TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSendData */
+}
- /***************************************************************
- * TLan_MiiSync
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- *
- * This functions syncs all PHYs in terms of the MII configuration
- * bus.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
{
int i;
u16 sio;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
- for ( i = 0; i < 32; i++ ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSync */
+}
- /***************************************************************
- * TLan_MiiWriteReg
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure for the device
- * to write to.
- * phy The address of the PHY to be written to.
- * reg The register whose contents are to be
- * written.
- * val The value to be written to the register.
- *
- * This function uses the TLAN's MII bus to write the contents of a
- * given register on a PHY. It sends the appropriate info and then
- * writes the 16-bit register value from the MII configuration bus
- * via the TLAN SIO register.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
unsigned long flags = 0;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3153,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync( dev->base_addr );
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
- TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
-} /* TLan_MiiWriteReg */
+}
@@ -3090,229 +3184,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
- The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
- EEPROM. These functions are based on information in Microchip's
- data sheet. I don't know how well this functions will work with
- other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_EeSendStart
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- *
- * This function sends a start cycle to an EEPROM attached
- * to a TLAN chip.
- *
- **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
{
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
- /***************************************************************
- * TLan_EeSendByte
- *
- * Returns:
- * If the correct ack was received, 0, otherwise 1
- * Parms: io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data The 8 bits of information to
- * send to the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is sent after the ack is
- * read.
- *
- * This function sends a byte on the serial EEPROM line,
- * driving the clock to send each bit. The function then
- * reverses transmission direction and reads an acknowledge
- * bit.
- *
- **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
{
int err;
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
/* Assume clock is low, tx is enabled; */
- for ( place = 0x80; place != 0; place >>= 1 ) {
- if ( place & data )
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
- if ( ( ! err ) && stop ) {
+ if ((!err) && stop) {
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
return err;
-} /* TLan_EeSendByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReceiveByte
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data An address to a char to hold the
- * data sent from the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is received, and no ack is
- * sent.
- *
- * This function receives 8 bits of data from the EEPROM
- * over the serial link. It then sends and ack bit, or no
- * ack and a stop bit. This function is used to retrieve
- * data after the address of a byte in the EEPROM has been
- * sent.
- *
- **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
{
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
*data = 0;
/* Assume clock is low, tx is enabled; */
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- for ( place = 0x80; place; place >>= 1 ) {
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
*data |= place;
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- if ( ! stop ) {
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
} else {
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReadByte
- *
- * Returns:
- * No error = 0, else, the stage at which the error
- * occurred.
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * ee_addr The address of the byte in the
- * EEPROM whose contents are to be
- * retrieved.
- * data An address to a char to hold the
- * data obtained from the EEPROM.
- *
- * This function reads a byte of information from an byte
- * cell in the EEPROM.
- *
- **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
{
int err;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- int ret=0;
+ int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=1;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
goto fail;
}
- err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=2;
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
goto fail;
}
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=3;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
goto fail;
}
- TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
fail:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
-} /* TLan_EeReadByte */
+}
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e2..5fc98a8e4889 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
********************************************************************/
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/netdevice.h>
@@ -40,8 +40,11 @@
#define TLAN_IGNORE 0
#define TLAN_RECORD 1
-#define TLAN_DBG(lvl, format, args...) \
- do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
#define TLAN_DEBUG_GNRL 0x0001
#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
#define TLAN_DEBUG_PROBE 0x0010
#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
/*****************************************************************
@@ -70,13 +74,13 @@
#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
#endif
-typedef struct tlan_adapter_entry {
- u16 vendorId;
- u16 deviceId;
- char *deviceLabel;
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
u32 flags;
- u16 addrOfs;
-} TLanAdapterEntry;
+ u16 addr_ofs;
+};
#define TLAN_ADAPTER_NONE 0x00000000
#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
#define TLAN_CSTAT_DP_PR 0x0100
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
u32 count;
u32 address;
-} TLanBufferRef;
+};
-typedef struct tlan_list_tag {
+struct tlan_list {
u32 forward;
- u16 cStat;
- u16 frameSize;
- TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
*
****************************************************************/
-typedef struct tlan_private_tag {
- struct net_device *nextDevice;
- struct pci_dev *pciDev;
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
struct net_device *dev;
- void *dmaStorage;
- dma_addr_t dmaStorageDMA;
- unsigned int dmaSize;
- u8 *padBuffer;
- TLanList *rxList;
- dma_addr_t rxListDMA;
- u8 *rxBuffer;
- dma_addr_t rxBufferDMA;
- u32 rxHead;
- u32 rxTail;
- u32 rxEocCount;
- TLanList *txList;
- dma_addr_t txListDMA;
- u8 *txBuffer;
- dma_addr_t txBufferDMA;
- u32 txHead;
- u32 txInProgress;
- u32 txTail;
- u32 txBusyCount;
- u32 phyOnline;
- u32 timerSetAt;
- u32 timerType;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
struct timer_list timer;
struct board *adapter;
- u32 adapterRev;
+ u32 adapter_rev;
u32 aui;
u32 debug;
u32 duplex;
u32 phy[2];
- u32 phyNum;
+ u32 phy_num;
u32 speed;
- u8 tlanRev;
- u8 tlanFullDuplex;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
spinlock_t lock;
u8 link;
u8 is_eisa;
struct work_struct tlan_tqueue;
u8 neg_be_verbose;
-} TLanPrivateInfo;
+};
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
****************************************************************/
#define TLAN_HOST_CMD 0x00
-#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_GO 0x80000000
#define TLAN_HC_STOP 0x40000000
#define TLAN_HC_ACK 0x20000000
#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_CMD_TRFRAM 0x02
#define TLAN_NET_CMD_TXPACE 0x01
#define TLAN_NET_SIO 0x01
-#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_MINTEN 0x80
#define TLAN_NET_SIO_ECLOK 0x40
#define TLAN_NET_SIO_ETXEN 0x20
#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_MASK_MASK4 0x10
#define TLAN_NET_MASK_RSRVD 0x0F
#define TLAN_NET_CONFIG 0x04
-#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_RCLK 0x8000
#define TLAN_NET_CFG_TCLK 0x4000
#define TLAN_NET_CFG_BIT 0x2000
#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
/* Generic MII/PHY Registers */
#define MII_GEN_CTL 0x00
-#define MII_GC_RESET 0x8000
+#define MII_GC_RESET 0x8000
#define MII_GC_LOOPBK 0x4000
#define MII_GC_SPEEDSEL 0x2000
#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
#define MII_GS_EXTCAP 0x0001
#define MII_GEN_ID_HI 0x02
#define MII_GEN_ID_LO 0x03
-#define MII_GIL_OUI 0xFC00
-#define MII_GIL_MODEL 0x03F0
-#define MII_GIL_REVISION 0x000F
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
#define MII_AN_ADV 0x04
#define MII_AN_LPA 0x05
#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
#define TLAN_TLPHY_ID 0x10
#define TLAN_TLPHY_CTL 0x11
-#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_IGLINK 0x8000
#define TLAN_TC_SWAPOL 0x4000
#define TLAN_TC_AUISEL 0x2000
#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
#define LEVEL1_ID1 0x7810
#define LEVEL1_ID2 0x0000
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
/* Routines to access internal registers. */
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
-} /* TLan_DioRead8 */
+}
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
-} /* TLan_DioRead16 */
+}
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inl(base_addr + TLAN_DIO_DATA);
-} /* TLan_DioRead32 */
+}
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
}
-#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
/*
* given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
*
* The original code was:
*
- * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
*
- * #define XOR8( a, b, c, d, e, f, g, h ) \
- * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
*
- * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- * DA(a,30), DA(a,36), DA(a,42) );
- * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- * DA(a,31), DA(a,37), DA(a,43) ) << 1;
- * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- * DA(a,32), DA(a,38), DA(a,44) ) << 2;
- * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- * DA(a,33), DA(a,39), DA(a,45) ) << 3;
- * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- * DA(a,34), DA(a,40), DA(a,46) ) << 4;
- * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- * DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
*
*/
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
{
- u8 hash;
+ u8 hash;
- hash = (a[0]^a[3]); /* & 077 */
- hash ^= ((a[0]^a[3])>>6); /* & 003 */
- hash ^= ((a[1]^a[4])<<2); /* & 074 */
- hash ^= ((a[1]^a[4])>>4); /* & 017 */
- hash ^= ((a[2]^a[5])<<4); /* & 060 */
- hash ^= ((a[2]^a[5])>>2); /* & 077 */
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
- return hash & 077;
+ return hash & 077;
}
#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d7..55786a0efc41 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
{
- unsigned int old_features, features;
+ u32 old_features, features;
old_features = dev->features;
/* Unset features, set them as we chew on the arg. */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e7..7fa5ec2de942 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#include <generated/utsrelease.h>
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d776c4a8d3c1..04e8ce14a1d0 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "30-Nov-2010"
+#define DRIVER_VERSION "17-Jan-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -868,15 +868,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
restart = 1;
- } else
+ } else {
restart = 0;
+ }
spin_unlock(&ctx->mtx);
- if (restart)
+ if (restart) {
+ spin_lock(&ctx->mtx);
cdc_ncm_tx_timeout_start(ctx);
- else if (ctx->netdev != NULL)
+ spin_unlock(&ctx->mtx);
+ } else if (ctx->netdev != NULL) {
usbnet_start_xmit(NULL, ctx->netdev);
+ }
}
static struct sk_buff *
@@ -900,7 +904,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
if (ctx->tx_curr_skb != NULL)
need_timer = 1;
- spin_unlock(&ctx->mtx);
/* Start timer, if there is a remaining skb */
if (need_timer)
@@ -908,6 +911,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
if (skb_out)
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+ spin_unlock(&ctx->mtx);
return skb_out;
error:
@@ -1020,8 +1025,8 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
if (((offset + temp) > actlen) ||
(temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
pr_debug("invalid frame detected (ignored)"
- "offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, temp, skb_in);
+ "offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, temp, skb_in);
if (!x)
goto error;
break;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
err("Firmware too big: %zu", fw->size);
+ release_firmware(fw);
return -ENOSPC;
}
data_len = fw->size;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3ff..105d7f0630cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
- if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
- if (strchr(dev->name, '%')) {
- err = dev_alloc_name(dev, dev->name);
- if (err < 0)
- goto err_alloc_name;
- }
-
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
-err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd7..0d6fec6b7d93 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
static int velocity_set_wol(struct velocity_info *vptr)
{
struct mac_regs __iomem *regs = vptr->mac_regs;
+ enum speed_opt spd_dpx = vptr->options.spd_dpx;
static u8 buf[256];
int i;
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
writew(0x0FFF, &regs->WOLSRClr);
+ if (spd_dpx == SPD_DPX_1000_FULL)
+ goto mac_done;
+
+ if (spd_dpx != SPD_DPX_AUTO)
+ goto advertise_done;
+
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_SPEED_1000)
MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+advertise_done:
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
{
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
writeb(GCR, &regs->CHIPGCR);
}
+mac_done:
BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
/* Turn on SWPTAG just before entering power mode */
BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff61..d7227539484e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
#define MAC_REG_CHIPGSR 0x9C
#define MAC_REG_TESTCFG 0x9D
#define MAC_REG_DEBUG 0x9E
-#define MAC_REG_CHIPGCR 0x9F
+#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
#define MAC_REG_WOLCR0_SET 0xA0
#define MAC_REG_WOLCR1_SET 0xA1
#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
* Bits in CHIPGCR register
*/
-#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */
-#define CHIPGCR_FCFDX 0x40
+#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX 0x40 /* force full duplex */
#define CHIPGCR_FCRESV 0x20
-#define CHIPGCR_FCMODE 0x10
+#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
#define CHIPGCR_LPSOPT 0x08
#define CHIPGCR_TM1US 0x04
#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5b..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
static int enable_mq = 1;
static int irq_share_mode;
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+
/*
* Enable/Disable the given intr
*/
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
int i;
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
+ spin_unlock(&adapter->cmd_lock);
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb_transport_header(skb))->doff * 4;
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
- unsigned int pull_size;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)
skb_network_header(skb);
- if (iph->protocol == IPPROTO_TCP) {
- pull_size = ctx->eth_ip_hdr_size +
- sizeof(struct tcphdr);
-
- if (unlikely(!pskb_may_pull(skb,
- pull_size))) {
- goto err;
- }
+ if (iph->protocol == IPPROTO_TCP)
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
- } else if (iph->protocol == IPPROTO_UDP) {
+ else if (iph->protocol == IPPROTO_UDP)
+ /*
+ * Use tcp header size so that bytes to
+ * be copied are more than required by
+ * the device.
+ */
ctx->l4_hdr_size =
- sizeof(struct udphdr);
- } else {
+ sizeof(struct tcphdr);
+ else
ctx->l4_hdr_size = 0;
- }
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_DriverShared *shared = adapter->shared;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
if (grp) {
/* add vlan rx stripping. */
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
int i;
- struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = grp;
- /* update FEATURES to device */
- devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
/*
* Clear entire vfTable; then enable untagged pkts.
* Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
vfTable[i] = 0;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else {
printk(KERN_ERR "%s: vlan_rx_register when device has "
"no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
*/
vfTable[i] = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
- /* update FEATURES to device */
- devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1985,6 +1990,7 @@ static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->mfTablePA = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
kfree(new_table);
}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
- if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
- adapter->vlan_grp) {
+ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- }
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
+ vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
/* the rest are already zeroed */
}
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
int err, i;
u32 ret;
+ unsigned long flags;
dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
adapter->shared_pa));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -2263,12 +2277,15 @@ int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
int i;
+ unsigned long flags;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
ring0_size = adapter->rx_queue[0].rx_ring[0].size;
ring0_size = (ring0_size + sz - 1) / sz * sz;
- ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+ ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
break;
} else {
/* If fails to enable required number of MSI-x vectors
- * try enabling 3 of them. One each for rx, tx and event
+ * try enabling minimum number of vectors required.
*/
vectors = vector_threshold;
printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
u32 cfg;
/* intr settings */
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock(&adapter->cmd_lock);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
*/
if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
- || adapter->num_rx_queues != 2) {
+ || adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
printk(KERN_ERR "Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
+ spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
+ unsigned long flags;
int i = 0;
if (!netif_running(netdev))
return 0;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
+
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
static int
vmxnet3_resume(struct device *device)
{
- int err;
+ int err, i = 0;
+ unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
pci_enable_wake(pdev, PCI_D0, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_request_irqs(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe7..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
if (adapter->rxcsum != val) {
adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
/* description, offset */
- { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
- { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
- { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
- { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
- { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
- { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
- { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
- { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
- { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
- { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+ { "Tx Queue#", 0 },
+ { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+ { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+ { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+ { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+ { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+ { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+ { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+ { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+ { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
+ { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
};
/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
/* description, offset */
- {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
- drop_total) },
- { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
- drop_too_many_frags) },
- { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- drop_oversized_hdr) },
- { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
- drop_hdr_inspect_err) },
- { " tso", offsetof(struct vmxnet3_tq_driver_stats,
- drop_tso) },
- { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
- tx_ring_full) },
- { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
- linearized) },
- { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
- copy_skb_header) },
- { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- oversized_hdr) },
+ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_total) },
+ { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_too_many_frags) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_oversized_hdr) },
+ { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_hdr_inspect_err) },
+ { " tso", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_tso) },
+ { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
+ tx_ring_full) },
+ { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
+ linearized) },
+ { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
+ copy_skb_header) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ oversized_hdr) },
};
/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
- { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
- { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
- { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
- { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
- { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
- { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
- { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
- { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
- { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
- { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
+ { "Rx Queue#", 0 },
+ { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+ { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+ { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+ { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+ { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+ { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+ { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+ { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+ { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+ { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
};
/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
/* description, offset */
- { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
- drop_total) },
- { " err", offsetof(struct vmxnet3_rq_driver_stats,
- drop_err) },
- { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
- drop_fcs) },
- { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
- rx_buf_alloc_failure) },
+ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_total) },
+ { " err", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_err) },
+ { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_fcs) },
+ { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+ rx_buf_alloc_failure) },
};
/* gloabl stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
- { "tx timeout count", offsetof(struct vmxnet3_adapter,
+ { "tx timeout count", offsetof(struct vmxnet3_adapter,
tx_timeout_count) }
};
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
+ unsigned long flags;
int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
memset(net_stats, 0, sizeof(*net_stats));
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
- ARRAY_SIZE(vmxnet3_tq_driver_stats) +
- ARRAY_SIZE(vmxnet3_rq_dev_stats) +
- ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+ return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+ adapter->num_tx_queues +
+ (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+ adapter->num_rx_queues +
ARRAY_SIZE(vmxnet3_global_stats);
default:
return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
}
+/* Should be multiple of 4 */
+#define NUM_TX_REGS 8
+#define NUM_RX_REGS 12
+
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
- return 20 * sizeof(u32);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
+ adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
}
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (stringset == ETH_SS_STATS) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
- memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
- memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
- memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+ int i, j;
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
- memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
+
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
memcpy(buf, vmxnet3_global_stats[i].desc,
ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+ unsigned long flags;
if (data & ~ETH_FLAG_LRO)
return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
return 0;
}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
u8 *base;
int i;
int j = 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
/* this does assume each counter is 64-bit wide */
-/* TODO change this for multiple queues */
-
- base = (u8 *)&adapter->tqd_start[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
-
- base = (u8 *)&adapter->tx_queue[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
-
- base = (u8 *)&adapter->rqd_start[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->tqd_start[j].stats;
+ *buf++ = (u64)j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->tx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_driver_stats[i].offset);
+ }
- base = (u8 *)&adapter->rx_queue[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->rqd_start[j].stats;
+ *buf++ = (u64) j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->rx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_driver_stats[i].offset);
+ }
base = (u8 *)adapter;
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
- int i = 0;
+ int i = 0, j = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
-/* TODO change this for multiple queues */
- buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
- buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
- buf[2] = adapter->tx_queue[i].tx_ring.gen;
- buf[3] = 0;
-
- buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
- buf[5] = adapter->tx_queue[i].comp_ring.gen;
- buf[6] = adapter->tx_queue[i].stopped;
- buf[7] = 0;
-
- buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
- buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
- buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
- buf[11] = 0;
-
- buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
- buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
- buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
- buf[15] = 0;
-
- buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
- buf[17] = adapter->rx_queue[i].comp_ring.gen;
- buf[18] = 0;
- buf[19] = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
+ buf[j++] = adapter->tx_queue[i].tx_ring.gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->tx_queue[i].comp_ring.gen;
+ buf[j++] = adapter->tx_queue[i].stopped;
+ buf[j++] = 0;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->rx_queue[i].comp_ring.gen;
+ buf[j++] = 0;
+ buf[j++] = 0;
+ }
+
}
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
const struct ethtool_rxfh_indir *p)
{
unsigned int i;
+ unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p->ring_index[i];
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RSSIDT);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f03..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
struct vlan_group *vlan_grp;
struct vmxnet3_intr intr;
+ spinlock_t cmd_lock;
struct Vmxnet3_DriverShared *shared;
struct Vmxnet3_PMConf *pm_conf;
struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..77097e383cf4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
VXGE_HW_FW_API_GET_EPROM_REV,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
ring->rxd_init = attr->rxd_init;
ring->rxd_term = attr->rxd_term;
ring->buffer_mode = config->buffer_mode;
+ ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+ ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
ring->rxds_limit = config->rxds_limit;
ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+ fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+ fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
if (fifo->config->intr)
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg3_saved = val64;
}
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg3_saved = val64;
}
val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
return status;
}
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
- config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d160..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
u32 vsport_number;
u32 max_kdfc_db;
u32 max_nofl_db;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
struct __vxge_hw_ring *____cacheline_aligned ringh;
struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
u32 doorbell_cnt;
u32 total_db_cnt;
u64 rxds_limit;
+ u32 rtimer;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
u32 per_txdl_space;
u32 vp_id;
u32 tx_intr_num;
+ u32 rtimer;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c683..e40f619b62b1 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
struct vxge_hw_ring_rxd_info ext_info;
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
- ring->pkts_processed = 0;
-
- vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
return ret;
}
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+ int i = 0;
+
+ /* Enable CI for RTI */
+ if (vdev->config.intr_type == MSI_X) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_ring *hw_ring;
+
+ hw_ring = vdev->vpaths[i].ring.handle;
+ vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+ }
+ }
+
+ /* Enable CI for TTI */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+ vxge_hw_vpath_tti_ci_set(hw_fifo);
+ /*
+ * For Inta (with or without napi), Set CI ON for only one
+ * vpath. (Have only one free running timer).
+ */
+ if ((vdev->config.intr_type == INTA) && (i == 0))
+ break;
+ }
+
+ return;
+}
+
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_wake_all_queues(vdev->ndev);
}
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
*/
static int vxge_poll_msix(struct napi_struct *napi, int budget)
{
- struct vxge_ring *ring =
- container_of(napi, struct vxge_ring, napi);
+ struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+ int pkts_processed;
int budget_org = budget;
- ring->budget = budget;
+ ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
if (ring->pkts_processed < budget_org) {
napi_complete(napi);
+
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
+ mmiowb();
}
- return ring->pkts_processed;
+ /* We are copying and returning the local variable, in case if after
+ * clearing the msix interrupt above, if the interrupt fires right
+ * away which can preempt this NAPI thread */
+ return pkts_processed;
}
static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed += ring->pkts_processed;
budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
netdev_get_tx_queue(vdev->ndev, 0);
vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts;
+ vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
return VXGE_HW_OK;
}
+/**
+ * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @fifo: pointer to transmit fifo structure
+ * Description: The function changes boundary timer and restriction timer
+ * value depends on the traffic
+ * Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+ fifo->interrupt_count++;
+ if (jiffies > fifo->jiffies + HZ / 100) {
+ struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+ fifo->jiffies = jiffies;
+ if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+ hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+ hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ } else if (hw_fifo->rtimer != 0) {
+ hw_fifo->rtimer = 0;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ }
+ fifo->interrupt_count = 0;
+ }
+}
+
+/**
+ * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @ring: pointer to receive ring structure
+ * Description: The function increases of decreases the packet counts within
+ * the ranges of traffic utilization, if the interrupts due to this ring are
+ * not within a fixed range.
+ * Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+ ring->interrupt_count++;
+ if (jiffies > ring->jiffies + HZ / 100) {
+ struct __vxge_hw_ring *hw_ring = ring->handle;
+
+ ring->jiffies = jiffies;
+ if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+ hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+ hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ } else if (hw_ring->rtimer != 0) {
+ hw_ring->rtimer = 0;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ }
+ ring->interrupt_count = 0;
+ }
+}
+
/*
* vxge_isr_napi
* @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
#ifdef CONFIG_PCI_MSI
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
+ adaptive_coalesce_tx_interrupts(fifo);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
VXGE_COMPLETE_VPATH_TX(fifo);
+ vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ mmiowb();
+
return IRQ_HANDLED;
}
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
{
struct vxge_ring *ring = (struct vxge_ring *)dev_id;
- /* MSIX_IDX for Rx is 1 */
+ adaptive_coalesce_rx_interrupts(ring);
+
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
+ ring->rx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
napi_schedule(&ring->napi);
return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
+ /* Reduce the chance of loosing alarm interrupts by masking
+ * the vector. A pending bit will be set if an alarm is
+ * generated and on unmask the interrupt will be fired.
+ */
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+ vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+ mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
-
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
+ msix_id);
+ mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
vpath->ring.rx_vector_no = (vpath->device_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+ vpath->fifo.tx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE);
+
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
VXGE_ALARM_MSIX_ID);
}
@@ -2474,8 +2592,9 @@ INTA_MODE:
"%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
- vxge_hw_vpath_tti_ci_set(vdev->devh,
- vdev->vpaths[0].device_id);
+
+ vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
ret = request_irq((int) vdev->pdev->irq,
vxge_isr_napi,
IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
}
netif_tx_start_all_queues(vdev->ndev);
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
goto out0;
out2:
@@ -3348,7 +3471,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(VXGE_ERR,
"%s: vpath memory allocation failed",
vdev->ndev->name);
- ret = -ENODEV;
+ ret = -ENOMEM;
goto _out1;
}
@@ -3369,11 +3492,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
if (vdev->config.gro_enable)
ndev->features |= NETIF_F_GRO;
- if (register_netdev(ndev)) {
+ ret = register_netdev(ndev);
+ if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s: %s : device registration failed!",
ndev->name, __func__);
- ret = -ENODEV;
goto _out2;
}
@@ -3444,6 +3567,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
+ kfree(vdev->vpaths);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
+
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3927,7 @@ static void __devinit vxge_device_config_init(
break;
case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+ device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
break;
}
@@ -4335,10 +4463,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit1;
}
- if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+ ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+ if (ret) {
vxge_debug_init(VXGE_ERR,
"%s : request regions failed", __func__);
- ret = -ENODEV;
goto _exit1;
}
@@ -4446,7 +4574,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (!img[i].is_valid)
break;
vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+ "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
VXGE_EPROM_IMG_MAJOR(img[i].version),
VXGE_EPROM_IMG_MINOR(img[i].version),
VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4771,9 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_disable_sriov(pdev);
+ pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
+ pci_disable_sriov(pdev);
_exit3:
iounmap(attr.bar0);
_exit2:
@@ -4655,7 +4784,7 @@ _exit0:
kfree(ll_config);
kfree(device_config);
driver_config->config_dev_cnt--;
- pci_set_drvdata(pdev, NULL);
+ driver_config->total_dev_cnt--;
return ret;
}
@@ -4668,45 +4797,34 @@ _exit0:
static void __devexit vxge_remove(struct pci_dev *pdev)
{
struct __vxge_hw_device *hldev;
- struct vxgedev *vdev = NULL;
- struct net_device *dev;
- int i = 0;
+ struct vxgedev *vdev;
+ int i;
hldev = pci_get_drvdata(pdev);
-
if (hldev == NULL)
return;
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
+ vdev = netdev_priv(hldev->ndev);
vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
-
vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
__func__);
- vxge_device_unregister(hldev);
- for (i = 0; i < vdev->no_of_vpath; i++) {
+ for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
- vdev->vpaths[i].mcast_addr_cnt = 0;
- vdev->vpaths[i].mac_addr_cnt = 0;
- }
-
- kfree(vdev->vpaths);
+ vxge_device_unregister(hldev);
+ pci_set_drvdata(pdev, NULL);
+ /* Do not call pci_disable_sriov here, as it will break child devices */
+ vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
-
- /* we are safe to free it now */
- free_netdev(dev);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
__func__, __LINE__);
-
- vxge_hw_device_terminate(hldev);
-
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
- pci_set_drvdata(pdev, NULL);
vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
__LINE__);
}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356f..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
#define VXGE_TTI_LTIMER_VAL 1000
#define VXGE_T1A_TTI_LTIMER_VAL 80
#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_TTI_RTIMER_ADAPT_VAL 10
#define VXGE_T1A_TTI_RTIMER_VAL 400
#define VXGE_RTI_BTIMER_VAL 250
#define VXGE_RTI_LTIMER_VAL 100
#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL 15
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
#define RTI_T1A_RX_UFC_C 50
#define RTI_T1A_RX_UFC_D 60
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
int tx_steering_type;
int indicate_max_pkts;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ u32 tx_vector_no;
/* Tx stats */
struct vxge_fifo_stats stats;
} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
*/
int driver_id;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
/* copy of the flag indicating whether rx_csum is to be used */
u32 rx_csum:1,
rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
int vlan_tag_strip;
struct vlan_group *vlgrp;
- int rx_vector_no;
+ u32 rx_vector_no;
enum vxge_hw_status last_status;
/* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075f..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
return status;
}
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+ u64 val64;
+
+ if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+ return;
+
+ vp_reg = fifo->vp_reg;
+ config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+ if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ fifo->tim_tti_cfg1_saved = val64;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg1_saved;
+
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ ring->tim_rti_cfg1_saved = val64;
+ writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+ u64 val64 = fifo->tim_tti_cfg3_saved;
+ u64 timer = (fifo->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+ writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ /* tti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg3_saved;
+ u64 timer = (ring->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+ writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ /* rti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
* @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
}
/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
* vxge_hw_device_set_intr_type - Updates the configuration
* with new interrupt type.
* @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
if (vpath->hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+ 0, 32), &vp_reg->one_shot_vect0_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
0, 32), &vp_reg->one_shot_vect1_en);
- }
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
0, 32), &vp_reg->one_shot_vect2_en);
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
- 0, 32), &vp_reg->one_shot_vect3_en);
}
}
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+ else
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa1..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
* Virtual Paths
*/
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
u32 vxge_hw_vpath_id(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2245,6 +2249,8 @@ void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
@@ -2270,6 +2276,9 @@ void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf3..581e21525e85 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "1"
-#define VXGE_VERSION_BUILD "22082"
+#define VXGE_VERSION_FIX "2"
+#define VXGE_VERSION_BUILD "22259"
#define VXGE_VERSION_FOR "k"
#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de5749824..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+ /* Statistics */
+ int rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
return cons;
}
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /*
+ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ struct netfront_info *np = netdev_priv(dev);
+ np->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = (struct tcphdr *)th;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_TCP, 0);
+ }
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = (struct udphdr *)th;
+ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_UDP, 0);
+ }
break;
default:
if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_setup(skb)) {
- kfree_skb(skb);
- packets_dropped++;
- dev->stats.rx_errors++;
- continue;
- }
+ if (checksum_setup(dev, skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ dev->stats.rx_errors++;
+ continue;
}
dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
}
}
+static const struct xennet_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} xennet_stats[] = {
+ {
+ "rx_gso_checksum_fixup",
+ offsetof(struct netfront_info, rx_gso_checksum_fixup)
+ },
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xennet_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ void *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ xennet_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
static const struct ethtool_ops xennet_ethtool_ops =
{
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = xennet_set_sg,
.set_tso = xennet_set_tso,
.get_link = ethtool_op_get_link,
+
+ .get_sset_count = xennet_get_sset_count,
+ .get_ethtool_stats = xennet_get_ethtool_stats,
+ .get_strings = xennet_get_strings,
};
#ifdef CONFIG_SYSFS
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda70981b7a6..dc29348264c6 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,7 +31,7 @@ source "drivers/pci/pcie/aer/Kconfig"
# PCI Express ASPM
#
config PCIEASPM
- bool "PCI Express ASPM control" if EMBEDDED
+ bool "PCI Express ASPM control" if EXPERT
depends on PCI && PCIEPORTBUS
default y
help
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index de886f3dfd39..6e318ce41136 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,7 @@ comment "PC-card bridges"
config YENTA
tristate "CardBus yenta-compatible bridge support"
depends on PCI
- select CARDBUS if !EMBEDDED
+ select CARDBUS if !EXPERT
select PCCARD_NONSTATIC if PCMCIA != n
---help---
This option enables support for CardBus host bridges. Virtually
@@ -84,27 +84,27 @@ config YENTA
config YENTA_O2
default y
- bool "Special initialization for O2Micro bridges" if EMBEDDED
+ bool "Special initialization for O2Micro bridges" if EXPERT
depends on YENTA
config YENTA_RICOH
default y
- bool "Special initialization for Ricoh bridges" if EMBEDDED
+ bool "Special initialization for Ricoh bridges" if EXPERT
depends on YENTA
config YENTA_TI
default y
- bool "Special initialization for TI and EnE bridges" if EMBEDDED
+ bool "Special initialization for TI and EnE bridges" if EXPERT
depends on YENTA
config YENTA_ENE_TUNE
default y
- bool "Auto-tune EnE bridges for CB cards" if EMBEDDED
+ bool "Auto-tune EnE bridges for CB cards" if EXPERT
depends on YENTA_TI && CARDBUS
config YENTA_TOSHIBA
default y
- bool "Special initialization for Toshiba ToPIC bridges" if EMBEDDED
+ bool "Special initialization for Toshiba ToPIC bridges" if EXPERT
depends on YENTA
config PD6729
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 467e82bd0929..a50391b6ba2a 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -943,6 +943,8 @@ static int rio_enum_complete(struct rio_mport *port)
* @port: Master port to send transactions
* @destid: Current destination ID in network
* @hopcount: Number of hops into the network
+ * @prev: previous rio_dev
+ * @prev_port: previous port number
*
* Recursively discovers a RIO network. Transactions are sent via the
* master port passed in @port.
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4941cade319f..cdd97192dc69 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,18 +97,6 @@ config RTC_INTF_DEV
If unsure, say Y.
-config RTC_INTF_DEV_UIE_EMUL
- bool "RTC UIE emulation on dev interface"
- depends on RTC_INTF_DEV
- help
- Provides an emulation for RTC_UIE if the underlying rtc chip
- driver does not expose RTC_UIE ioctls. Those requests generate
- once-per-second update interrupts, used for synchronization.
-
- The emulation code will read the time from the hardware
- clock several times per second, please enable this option
- only if you know that you really need it.
-
config RTC_DRV_TEST
tristate "Test driver/device"
help
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 90384b9f6b2c..925006d33109 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -16,6 +16,9 @@
#include <linux/log2.h>
#include <linux/workqueue.h>
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+
static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
{
int err;
@@ -120,12 +123,18 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
- alarm->enabled = rtc->aie_timer.enabled;
- if (alarm->enabled)
+ if (rtc->ops == NULL)
+ err = -ENODEV;
+ else if (!rtc->ops->read_alarm)
+ err = -EINVAL;
+ else {
+ memset(alarm, 0, sizeof(struct rtc_wkalrm));
+ alarm->enabled = rtc->aie_timer.enabled;
alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
+ }
mutex_unlock(&rtc->ops_lock);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
@@ -175,16 +184,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
if (rtc->aie_timer.enabled) {
rtc_timer_remove(rtc, &rtc->aie_timer);
- rtc->aie_timer.enabled = 0;
}
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
rtc->aie_timer.period = ktime_set(0, 0);
if (alarm->enabled) {
- rtc->aie_timer.enabled = 1;
- rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
}
mutex_unlock(&rtc->ops_lock);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
@@ -195,15 +202,15 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
return err;
if (rtc->aie_timer.enabled != enabled) {
- if (enabled) {
- rtc->aie_timer.enabled = 1;
- rtc_timer_enqueue(rtc, &rtc->aie_timer);
- } else {
+ if (enabled)
+ err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ else
rtc_timer_remove(rtc, &rtc->aie_timer);
- rtc->aie_timer.enabled = 0;
- }
}
+ if (err)
+ return err;
+
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->alarm_irq_enable)
@@ -235,12 +242,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
now = rtc_tm_to_ktime(tm);
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
rtc->uie_rtctimer.period = ktime_set(1, 0);
- rtc->uie_rtctimer.enabled = 1;
- rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
- } else {
+ err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+ } else
rtc_timer_remove(rtc, &rtc->uie_rtctimer);
- rtc->uie_rtctimer.enabled = 0;
- }
out:
mutex_unlock(&rtc->ops_lock);
@@ -488,10 +492,13 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
* Enqueues a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
*
+ * Sets the enabled bit on the added timer.
+ *
* Must hold ops_lock for proper serialization of timerqueue
*/
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
{
+ timer->enabled = 1;
timerqueue_add(&rtc->timerqueue, &timer->node);
if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
struct rtc_wkalrm alarm;
@@ -501,7 +508,13 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
schedule_work(&rtc->irqwork);
+ else if (err) {
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+ timer->enabled = 0;
+ return err;
+ }
}
+ return 0;
}
/**
@@ -512,13 +525,15 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
* Removes a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
*
+ * Clears the enabled bit on the removed timer.
+ *
* Must hold ops_lock for proper serialization of timerqueue
*/
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
{
struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
timerqueue_del(&rtc->timerqueue, &timer->node);
-
+ timer->enabled = 0;
if (next == &timer->node) {
struct rtc_wkalrm alarm;
int err;
@@ -626,8 +641,7 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
timer->node.expires = expires;
timer->period = period;
- timer->enabled = 1;
- rtc_timer_enqueue(rtc, timer);
+ ret = rtc_timer_enqueue(rtc, timer);
mutex_unlock(&rtc->ops_lock);
return ret;
@@ -645,7 +659,6 @@ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
mutex_lock(&rtc->ops_lock);
if (timer->enabled)
rtc_timer_remove(rtc, timer);
- timer->enabled = 0;
mutex_unlock(&rtc->ops_lock);
return ret;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b664781..2ac8f6aff5a4 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -831,12 +831,14 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int qeth_l2_open(struct net_device *dev)
+static int __qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
+ if (card->state == CARD_STATE_UP)
+ return rc;
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
@@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev)
return rc;
}
+static int qeth_l2_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 5, "qethope_");
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_CARD_TEXT(card, 3, "openREC");
+ return -ERESTARTSYS;
+ }
+ return __qeth_l2_open(dev);
+}
+
static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@ contin:
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
- qeth_l2_open(card->dev);
+ __qeth_l2_open(card->dev);
} else {
rtnl_lock();
dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e465bfc4..d09b0c44fc3d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card,
*/
if (iph->protocol == IPPROTO_UDP)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+ QETH_HDR_EXT_CSUM_HDR_REQ;
+ iph->check = 0;
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
}
@@ -3240,12 +3242,14 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int qeth_l3_open(struct net_device *dev)
+static int __qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
+ if (card->state == CARD_STATE_UP)
+ return rc;
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev)
return rc;
}
+static int qeth_l3_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 5, "qethope_");
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_CARD_TEXT(card, 3, "openREC");
+ return -ERESTARTSYS;
+ }
+ return __qeth_l3_open(dev);
+}
+
static int qeth_l3_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@ contin:
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode)
- qeth_l3_open(card->dev);
+ __qeth_l3_open(card->dev);
else {
rtnl_lock();
dev_open(card->dev);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2d8cc455dbc7..42cdaa9a4d8a 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -82,7 +82,7 @@ config SSB_SDIOHOST
config SSB_SILENT
bool "No SSB kernel messages"
- depends on SSB && EMBEDDED
+ depends on SSB && EXPERT
help
This option turns off all Sonics Silicon Backplane printks.
Note that you won't be able to identify problems, once
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/lirc/TODO.lirc_zilog
index 6aa312df4018..2d0263f07937 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/lirc/TODO.lirc_zilog
@@ -1,13 +1,37 @@
-The binding between hdpvr and lirc_zilog is currently disabled,
+1. Both ir-kbd-i2c and lirc_zilog provide support for RX events.
+The 'tx_only' lirc_zilog module parameter will allow ir-kbd-i2c
+and lirc_zilog to coexist in the kernel, if the user requires such a set-up.
+However the IR unit will not work well without coordination between the
+two modules. A shared mutex, for transceiver access locking, needs to be
+supplied by bridge drivers, in struct IR_i2_init_data, to both ir-kbd-i2c
+and lirc_zilog, before they will coexist usefully. This should be fixed
+before moving out of staging.
+
+2. References and locking need careful examination. For cx18 and ivtv PCI
+cards, which are not easily "hot unplugged", the imperfect state of reference
+counting and locking is acceptable if not correct. For USB connected units
+like HD PVR, PVR USB2, HVR-1900, and HVR1950, the likelyhood of an Ooops on
+unplug is probably great. Proper reference counting and locking needs to be
+implemented before this module is moved out of staging.
+
+3. The binding between hdpvr and lirc_zilog is currently disabled,
due to an OOPS reported a few years ago when both the hdpvr and cx18
drivers were loaded in his system. More details can be seen at:
http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html
More tests need to be done, in order to fix the reported issue.
-There's a conflict between ir-kbd-i2c: Both provide support for RX events.
-Such conflict needs to be fixed, before moving it out of staging.
+4. In addition to providing a shared mutex for transceiver access
+locking, bridge drivers, if able, should provide a chip reset() callback
+to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
+to perform Z8 chip resets via GPIO manipulations. This will allow lirc_zilog
+to bring the chip back to normal when it hangs, in the same places the
+original lirc_pvr150 driver code does. This is not strictly needed, so it
+is not required to move lirc_zilog out of staging.
+
+5. Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
+and installed on Hauppauge products. When working on either module, developers
+must consider at least the following bridge drivers which mention an IR Rx unit
+at address 0x71 (indicative of a Z8):
-The way I2C probe works, it will try to register the driver twice, one
-for RX and another for TX. The logic needs to be fixed to avoid such
-issue.
+ ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 0da6b9518af9..235cab0eb087 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -447,6 +447,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
exit:
mutex_unlock(&context->ctx_lock);
+ kfree(data_buf);
return (!retval) ? n_bytes : retval;
}
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 929ae5795467..5938616f3e8f 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -232,6 +232,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
i++;
}
terminate_send(tx_buf[i - 1]);
+ kfree(tx_buf);
return n;
}
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index dfd2c447e67d..3a9c09881b2b 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -376,6 +376,7 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
unsigned long flags;
int counttimer;
int *wbuf;
+ ssize_t ret;
if (!is_claimed)
return -EBUSY;
@@ -393,8 +394,10 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
if (timer == 0) {
/* try again if device is ready */
timer = init_lirc_timer();
- if (timer == 0)
- return -EIO;
+ if (timer == 0) {
+ ret = -EIO;
+ goto out;
+ }
}
/* adjust values from usecs */
@@ -420,7 +423,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
if (check_pselecd && (in(1) & LP_PSELECD)) {
lirc_off();
local_irq_restore(flags);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
} while (counttimer < wbuf[i]);
i++;
@@ -436,7 +440,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
level = newlevel;
if (check_pselecd && (in(1) & LP_PSELECD)) {
local_irq_restore(flags);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
} while (counttimer < wbuf[i]);
i++;
@@ -445,7 +450,11 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
#else
/* place code that handles write without external timer here */
#endif
- return n;
+ ret = n;
+out:
+ kfree(wbuf);
+
+ return ret;
}
static unsigned int lirc_poll(struct file *file, poll_table *wait)
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 998485ebdbce..925eabe14854 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -448,6 +448,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
exit:
mutex_unlock(&context->ctx_lock);
+ kfree(data_buf);
return (!retval) ? n_bytes : retval;
}
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 9bcf149c4260..1c3099b388e0 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -966,7 +966,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
if (n % sizeof(int) || count % 2 == 0)
return -EINVAL;
wbuf = memdup_user(buf, n);
- if (PTR_ERR(wbuf))
+ if (IS_ERR(wbuf))
return PTR_ERR(wbuf);
spin_lock_irqsave(&hardware[type].lock, flags);
if (type == LIRC_IRDEO) {
@@ -981,6 +981,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
}
off();
spin_unlock_irqrestore(&hardware[type].lock, flags);
+ kfree(wbuf);
return n;
}
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index c553ab626238..76be7b8c6209 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -330,6 +330,7 @@ static ssize_t lirc_write(struct file *file, const char *buf, size_t n,
/* enable receiver */
Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE;
#endif
+ kfree(tx_buf);
return count;
}
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index ad29bb1275ab..3fe5f4160194 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -20,6 +20,9 @@
*
* parts are cut&pasted from the lirc_i2c.c driver
*
+ * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
+ * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -60,38 +63,44 @@
#include <media/lirc_dev.h>
#include <media/lirc.h>
-struct IR {
- struct lirc_driver l;
-
- /* Device info */
- struct mutex ir_lock;
- int open;
- bool is_hdpvr;
-
+struct IR_rx {
/* RX device */
- struct i2c_client c_rx;
- int have_rx;
+ struct i2c_client *c;
/* RX device buffer & lock */
struct lirc_buffer buf;
struct mutex buf_lock;
/* RX polling thread data */
- struct completion *t_notify;
- struct completion *t_notify2;
- int shutdown;
struct task_struct *task;
/* RX read data */
unsigned char b[3];
+ bool hdpvr_data_fmt;
+};
+struct IR_tx {
/* TX device */
- struct i2c_client c_tx;
+ struct i2c_client *c;
+
+ /* TX additional actions needed */
int need_boot;
- int have_tx;
+ bool post_tx_ready_poll;
+};
+
+struct IR {
+ struct lirc_driver l;
+
+ struct mutex ir_lock;
+ int open;
+
+ struct i2c_adapter *adapter;
+ struct IR_rx *rx;
+ struct IR_tx *tx;
};
/* Minor -> data mapping */
+static struct mutex ir_devices_lock;
static struct IR *ir_devices[MAX_IRCTL_DEVICES];
/* Block size for IR transmitter */
@@ -124,14 +133,11 @@ static struct mutex tx_data_lock;
#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
## args)
#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
-#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
+#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static int debug; /* debug output */
-static int disable_rx; /* disable RX device */
-static int disable_tx; /* disable TX device */
+static int tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
@@ -150,8 +156,12 @@ static int add_to_buf(struct IR *ir)
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
+ struct IR_rx *rx = ir->rx;
- if (lirc_buffer_full(&ir->buf)) {
+ if (rx == NULL)
+ return -ENXIO;
+
+ if (lirc_buffer_full(&rx->buf)) {
dprintk("buffer overflow\n");
return -EOVERFLOW;
}
@@ -161,17 +171,25 @@ static int add_to_buf(struct IR *ir)
* data and we have space
*/
do {
+ if (kthread_should_stop())
+ return -ENODATA;
+
/*
* Lock i2c bus for the duration. RX/TX chips interfere so
* this is worth it
*/
mutex_lock(&ir->ir_lock);
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
+
/*
* Send random "poll command" (?) Windows driver does this
* and it is a good point to detect chip failure.
*/
- ret = i2c_master_send(&ir->c_rx, sendbuf, 1);
+ ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
if (failures >= 3) {
@@ -186,45 +204,53 @@ static int add_to_buf(struct IR *ir)
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
schedule_timeout((100 * HZ + 999) / 1000);
- ir->need_boot = 1;
+ ir->tx->need_boot = 1;
++failures;
mutex_unlock(&ir->ir_lock);
continue;
}
- ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf));
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
+ ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
zilog_error("i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
- ir->b[0] = keybuf[3];
- ir->b[1] = keybuf[4];
- ir->b[2] = keybuf[5];
- dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]);
+ rx->b[0] = keybuf[3];
+ rx->b[1] = keybuf[4];
+ rx->b[2] = keybuf[5];
+ dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
}
/* key pressed ? */
- if (ir->is_hdpvr) {
+ if (rx->hdpvr_data_fmt) {
if (got_data && (keybuf[0] == 0x80))
return 0;
else if (got_data && (keybuf[0] == 0x00))
return -ENODATA;
- } else if ((ir->b[0] & 0x80) == 0)
+ } else if ((rx->b[0] & 0x80) == 0)
return got_data ? 0 : -ENODATA;
/* look what we have */
- code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2);
+ code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
codes[0] = (code >> 8) & 0xff;
codes[1] = code & 0xff;
/* return it */
- lirc_buffer_write(&ir->buf, codes);
+ lirc_buffer_write(&rx->buf, codes);
++got_data;
- } while (!lirc_buffer_full(&ir->buf));
+ } while (!lirc_buffer_full(&rx->buf));
return 0;
}
@@ -242,46 +268,35 @@ static int add_to_buf(struct IR *ir)
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
-
- if (ir->t_notify != NULL)
- complete(ir->t_notify);
+ struct IR_rx *rx = ir->rx;
dprintk("poll thread started\n");
- do {
- if (ir->open) {
- set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- /*
- * This is ~113*2 + 24 + jitter (2*repeat gap +
- * code length). We use this interval as the chip
- * resets every time you poll it (bad!). This is
- * therefore just sufficient to catch all of the
- * button presses. It makes the remote much more
- * responsive. You can see the difference by
- * running irw and holding down a button. With
- * 100ms, the old polling interval, you'll notice
- * breaks in the repeat sequence corresponding to
- * lost keypresses.
- */
- schedule_timeout((260 * HZ) / 1000);
- if (ir->shutdown)
- break;
- if (!add_to_buf(ir))
- wake_up_interruptible(&ir->buf.wait_poll);
- } else {
- /* if device not opened so we can sleep half a second */
- set_current_state(TASK_INTERRUPTIBLE);
+ /* if device not opened, we can sleep half a second */
+ if (!ir->open) {
schedule_timeout(HZ/2);
+ continue;
}
- } while (!ir->shutdown);
-
- if (ir->t_notify2 != NULL)
- wait_for_completion(ir->t_notify2);
- ir->task = NULL;
- if (ir->t_notify != NULL)
- complete(ir->t_notify);
+ /*
+ * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
+ * We use this interval as the chip resets every time you poll
+ * it (bad!). This is therefore just sufficient to catch all
+ * of the button presses. It makes the remote much more
+ * responsive. You can see the difference by running irw and
+ * holding down a button. With 100ms, the old polling
+ * interval, you'll notice breaks in the repeat sequence
+ * corresponding to lost keypresses.
+ */
+ schedule_timeout((260 * HZ) / 1000);
+ if (kthread_should_stop())
+ break;
+ if (!add_to_buf(ir))
+ wake_up_interruptible(&rx->buf.wait_poll);
+ }
dprintk("poll thread ended\n");
return 0;
@@ -299,10 +314,10 @@ static int set_use_inc(void *data)
* this is completely broken code. lirc_unregister_driver()
* must be possible even when the device is open
*/
- if (ir->c_rx.addr)
- i2c_use_client(&ir->c_rx);
- if (ir->c_tx.addr)
- i2c_use_client(&ir->c_tx);
+ if (ir->rx != NULL)
+ i2c_use_client(ir->rx->c);
+ if (ir->tx != NULL)
+ i2c_use_client(ir->tx->c);
return 0;
}
@@ -311,10 +326,10 @@ static void set_use_dec(void *data)
{
struct IR *ir = data;
- if (ir->c_rx.addr)
- i2c_release_client(&ir->c_rx);
- if (ir->c_tx.addr)
- i2c_release_client(&ir->c_tx);
+ if (ir->rx)
+ i2c_release_client(ir->rx->c);
+ if (ir->tx)
+ i2c_release_client(ir->tx->c);
if (ir->l.owner != NULL)
module_put(ir->l.owner);
}
@@ -453,7 +468,7 @@ corrupt:
}
/* send a block of data to the IR TX device */
-static int send_data_block(struct IR *ir, unsigned char *data_block)
+static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
{
int i, j, ret;
unsigned char buf[5];
@@ -467,7 +482,7 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
buf[1 + j] = data_block[i + j];
dprintk("%02x %02x %02x %02x %02x",
buf[0], buf[1], buf[2], buf[3], buf[4]);
- ret = i2c_master_send(&ir->c_tx, buf, tosend + 1);
+ ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -478,32 +493,32 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
}
/* send boot data to the IR TX device */
-static int send_boot_data(struct IR *ir)
+static int send_boot_data(struct IR_tx *tx)
{
int ret;
unsigned char buf[4];
/* send the boot block */
- ret = send_data_block(ir, tx_data->boot_data);
+ ret = send_data_block(tx, tx_data->boot_data);
if (ret != 0)
return ret;
/* kick it off? */
buf[0] = 0x00;
buf[1] = 0x20;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+ ret = i2c_master_send(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
- ret = i2c_master_recv(&ir->c_tx, buf, 4);
+ ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
@@ -543,7 +558,7 @@ static void fw_unload(void)
}
/* load "firmware" for the IR TX device */
-static int fw_load(struct IR *ir)
+static int fw_load(struct IR_tx *tx)
{
int ret;
unsigned int i;
@@ -558,7 +573,7 @@ static int fw_load(struct IR *ir)
}
/* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev);
+ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &tx->c->dev);
if (ret != 0) {
zilog_error("firmware haup-ir-blaster.bin not available "
"(%d)\n", ret);
@@ -685,20 +700,20 @@ out:
}
/* initialise the IR TX device */
-static int tx_init(struct IR *ir)
+static int tx_init(struct IR_tx *tx)
{
int ret;
/* Load 'firmware' */
- ret = fw_load(ir);
+ ret = fw_load(tx);
if (ret != 0)
return ret;
/* Send boot block */
- ret = send_boot_data(ir);
+ ret = send_boot_data(tx);
if (ret != 0)
return ret;
- ir->need_boot = 0;
+ tx->need_boot = 0;
/* Looks good */
return 0;
@@ -714,20 +729,20 @@ static loff_t lseek(struct file *filep, loff_t offset, int orig)
static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
{
struct IR *ir = filep->private_data;
- unsigned char buf[ir->buf.chunk_size];
+ struct IR_rx *rx = ir->rx;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
dprintk("read called\n");
- if (ir->c_rx.addr == 0)
+ if (rx == NULL)
return -ENODEV;
- if (mutex_lock_interruptible(&ir->buf_lock))
+ if (mutex_lock_interruptible(&rx->buf_lock))
return -ERESTARTSYS;
- if (n % ir->buf.chunk_size) {
+ if (n % rx->buf.chunk_size) {
dprintk("read result = -EINVAL\n");
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
return -EINVAL;
}
@@ -736,7 +751,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
- add_wait_queue(&ir->buf.wait_poll, &wait);
+ add_wait_queue(&rx->buf.wait_poll, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/*
@@ -744,7 +759,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < n && ret == 0) {
- if (lirc_buffer_empty(&ir->buf)) {
+ if (lirc_buffer_empty(&rx->buf)) {
/*
* According to the read(2) man page, 'written' can be
* returned as less than 'n', instead of blocking
@@ -764,16 +779,17 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
- lirc_buffer_read(&ir->buf, buf);
+ unsigned char buf[rx->buf.chunk_size];
+ lirc_buffer_read(&rx->buf, buf);
ret = copy_to_user((void *)outbuf+written, buf,
- ir->buf.chunk_size);
- written += ir->buf.chunk_size;
+ rx->buf.chunk_size);
+ written += rx->buf.chunk_size;
}
}
- remove_wait_queue(&ir->buf.wait_poll, &wait);
+ remove_wait_queue(&rx->buf.wait_poll, &wait);
set_current_state(TASK_RUNNING);
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
dprintk("read result = %s (%d)\n",
ret ? "-EFAULT" : "OK", ret);
@@ -782,7 +798,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
}
/* send a keypress to the IR TX device */
-static int send_code(struct IR *ir, unsigned int code, unsigned int key)
+static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
{
unsigned char data_block[TX_BLOCK_SIZE];
unsigned char buf[2];
@@ -799,26 +815,26 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
return ret;
/* Send the data block */
- ret = send_data_block(ir, data_block);
+ ret = send_data_block(tx, data_block);
if (ret != 0)
return ret;
/* Send data block length? */
buf[0] = 0x00;
buf[1] = 0x40;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+ ret = i2c_master_send(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
- ret = i2c_master_recv(&ir->c_tx, buf, 1);
+ ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -832,7 +848,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
/* Send prepare command? */
buf[0] = 0x00;
buf[1] = 0x80;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -843,7 +859,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
* last i2c_master_recv always fails with a -5, so for now, we're
* going to skip this whole mess and say we're done on the HD PVR
*/
- if (ir->is_hdpvr) {
+ if (!tx->post_tx_ready_poll) {
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
@@ -857,7 +873,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
for (i = 0; i < 20; ++i) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((50 * HZ + 999) / 1000);
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+ ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
dprintk("NAK expected: i2c_master_send "
@@ -870,7 +886,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
}
/* Seems to be an 'ok' response */
- i = i2c_master_recv(&ir->c_tx, buf, 1);
+ i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return -EFAULT;
@@ -895,10 +911,11 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
loff_t *ppos)
{
struct IR *ir = filep->private_data;
+ struct IR_tx *tx = ir->tx;
size_t i;
int failures = 0;
- if (ir->c_tx.addr == 0)
+ if (tx == NULL)
return -ENODEV;
/* Validate user parameters */
@@ -919,15 +936,15 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
}
/* Send boot data first if required */
- if (ir->need_boot == 1) {
- ret = send_boot_data(ir);
+ if (tx->need_boot == 1) {
+ ret = send_boot_data(tx);
if (ret == 0)
- ir->need_boot = 0;
+ tx->need_boot = 0;
}
/* Send the code */
if (ret == 0) {
- ret = send_code(ir, (unsigned)command >> 16,
+ ret = send_code(tx, (unsigned)command >> 16,
(unsigned)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
@@ -952,7 +969,7 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ + 999) / 1000);
- ir->need_boot = 1;
+ tx->need_boot = 1;
++failures;
} else
i += sizeof(int);
@@ -969,22 +986,23 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
static unsigned int poll(struct file *filep, poll_table *wait)
{
struct IR *ir = filep->private_data;
+ struct IR_rx *rx = ir->rx;
unsigned int ret;
dprintk("poll called\n");
- if (ir->c_rx.addr == 0)
+ if (rx == NULL)
return -ENODEV;
- mutex_lock(&ir->buf_lock);
+ mutex_lock(&rx->buf_lock);
- poll_wait(filep, &ir->buf.wait_poll, wait);
+ poll_wait(filep, &rx->buf.wait_poll, wait);
dprintk("poll result = %s\n",
- lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM");
+ lirc_buffer_empty(&rx->buf) ? "0" : "POLLIN|POLLRDNORM");
- ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM);
+ ret = lirc_buffer_empty(&rx->buf) ? 0 : (POLLIN|POLLRDNORM);
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
return ret;
}
@@ -994,10 +1012,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int result;
unsigned long mode, features = 0;
- if (ir->c_rx.addr != 0)
+ features |= LIRC_CAN_SEND_PULSE;
+ if (ir->rx != NULL)
features |= LIRC_CAN_REC_LIRCCODE;
- if (ir->c_tx.addr != 0)
- features |= LIRC_CAN_SEND_PULSE;
switch (cmd) {
case LIRC_GET_LENGTH:
@@ -1024,15 +1041,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
break;
case LIRC_SET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
result = get_user(mode, (unsigned long *) arg);
if (!result && mode != LIRC_MODE_PULSE)
return -EINVAL;
@@ -1043,6 +1054,15 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
}
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_minor(unsigned int minor)
+{
+ if (minor >= MAX_IRCTL_DEVICES)
+ return NULL;
+
+ return ir_devices[minor];
+}
+
/*
* Open the IR device. Get hold of our IR structure and
* stash it in private_data for the file
@@ -1051,15 +1071,15 @@ static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
int ret;
+ unsigned int minor = MINOR(node->i_rdev);
/* find our IR struct */
- unsigned minor = MINOR(node->i_rdev);
- if (minor >= MAX_IRCTL_DEVICES) {
- dprintk("minor %d: open result = -ENODEV\n",
- minor);
+ mutex_lock(&ir_devices_lock);
+ ir = find_ir_device_by_minor(minor);
+ mutex_unlock(&ir_devices_lock);
+
+ if (ir == NULL)
return -ENODEV;
- }
- ir = ir_devices[minor];
/* increment in use count */
mutex_lock(&ir->ir_lock);
@@ -1106,7 +1126,6 @@ static struct lirc_driver lirc_template = {
static int ir_remove(struct i2c_client *client);
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
#define ID_FLAG_TX 0x01
#define ID_FLAG_HDPVR 0x02
@@ -1126,7 +1145,6 @@ static struct i2c_driver driver = {
},
.probe = ir_probe,
.remove = ir_remove,
- .command = ir_command,
.id_table = ir_transceiver_id,
};
@@ -1144,214 +1162,253 @@ static const struct file_operations lirc_fops = {
.release = close
};
-static int ir_remove(struct i2c_client *client)
+static void destroy_rx_kthread(struct IR_rx *rx)
{
- struct IR *ir = i2c_get_clientdata(client);
+ /* end up polling thread */
+ if (rx != NULL && !IS_ERR_OR_NULL(rx->task)) {
+ kthread_stop(rx->task);
+ rx->task = NULL;
+ }
+}
- mutex_lock(&ir->ir_lock);
+/* ir_devices_lock must be held */
+static int add_ir_device(struct IR *ir)
+{
+ int i;
- if (ir->have_rx || ir->have_tx) {
- DECLARE_COMPLETION(tn);
- DECLARE_COMPLETION(tn2);
-
- /* end up polling thread */
- if (ir->task && !IS_ERR(ir->task)) {
- ir->t_notify = &tn;
- ir->t_notify2 = &tn2;
- ir->shutdown = 1;
- wake_up_process(ir->task);
- complete(&tn2);
- wait_for_completion(&tn);
- ir->t_notify = NULL;
- ir->t_notify2 = NULL;
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] == NULL) {
+ ir_devices[i] = ir;
+ break;
}
- } else {
- mutex_unlock(&ir->ir_lock);
- zilog_error("%s: detached from something we didn't "
- "attach to\n", __func__);
- return -ENODEV;
+ return i == MAX_IRCTL_DEVICES ? -ENOMEM : i;
+}
+
+/* ir_devices_lock must be held */
+static void del_ir_device(struct IR *ir)
+{
+ int i;
+
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] == ir) {
+ ir_devices[i] = NULL;
+ break;
+ }
+}
+
+static int ir_remove(struct i2c_client *client)
+{
+ struct IR *ir = i2c_get_clientdata(client);
+
+ mutex_lock(&ir_devices_lock);
+
+ if (ir == NULL) {
+ /* We destroyed everything when the first client came through */
+ mutex_unlock(&ir_devices_lock);
+ return 0;
}
- /* unregister lirc driver */
- if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
- lirc_unregister_driver(ir->l.minor);
- ir_devices[ir->l.minor] = NULL;
+ /* Good-bye LIRC */
+ lirc_unregister_driver(ir->l.minor);
+
+ /* Good-bye Rx */
+ destroy_rx_kthread(ir->rx);
+ if (ir->rx != NULL) {
+ if (ir->rx->buf.fifo_initialized)
+ lirc_buffer_free(&ir->rx->buf);
+ i2c_set_clientdata(ir->rx->c, NULL);
+ kfree(ir->rx);
}
- /* free memory */
- lirc_buffer_free(&ir->buf);
- mutex_unlock(&ir->ir_lock);
+ /* Good-bye Tx */
+ i2c_set_clientdata(ir->tx->c, NULL);
+ kfree(ir->tx);
+
+ /* Good-bye IR */
+ del_ir_device(ir);
kfree(ir);
+ mutex_unlock(&ir_devices_lock);
return 0;
}
-static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_adapter(struct i2c_adapter *adapter)
{
+ int i;
struct IR *ir = NULL;
+
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] != NULL &&
+ ir_devices[i]->adapter == adapter) {
+ ir = ir_devices[i];
+ break;
+ }
+
+ return ir;
+}
+
+static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct IR *ir;
struct i2c_adapter *adap = client->adapter;
- char buf;
int ret;
- int have_rx = 0, have_tx = 0;
+ bool tx_probe = false;
- dprintk("%s: adapter name (%s) nr %d, i2c_device_id name (%s), "
- "client addr=0x%02x\n",
- __func__, adap->name, adap->nr, id->name, client->addr);
+ dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+ __func__, id->name, adap->nr, adap->name, client->addr);
/*
- * FIXME - This probe function probes both the Tx and Rx
- * addresses of the IR microcontroller.
- *
- * However, the I2C subsystem is passing along one I2C client at a
- * time, based on matches to the ir_transceiver_id[] table above.
- * The expectation is that each i2c_client address will be probed
- * individually by drivers so the I2C subsystem can mark all client
- * addresses as claimed or not.
- *
- * This probe routine causes only one of the client addresses, TX or RX,
- * to be claimed. This will cause a problem if the I2C subsystem is
- * subsequently triggered to probe unclaimed clients again.
+ * The IR receiver is at i2c address 0x71.
+ * The IR transmitter is at i2c address 0x70.
*/
- /*
- * The external IR receiver is at i2c address 0x71.
- * The IR transmitter is at 0x70.
- */
- client->addr = 0x70;
- if (!disable_tx) {
- if (i2c_master_recv(client, &buf, 1) == 1)
- have_tx = 1;
- dprintk("probe 0x70 @ %s: %s\n",
- adap->name, have_tx ? "success" : "failed");
- }
+ if (id->driver_data & ID_FLAG_TX)
+ tx_probe = true;
+ else if (tx_only) /* module option */
+ return -ENXIO;
- if (!disable_rx) {
- client->addr = 0x71;
- if (i2c_master_recv(client, &buf, 1) == 1)
- have_rx = 1;
- dprintk("probe 0x71 @ %s: %s\n",
- adap->name, have_rx ? "success" : "failed");
- }
+ zilog_info("probing IR %s on %s (i2c-%d)\n",
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
- if (!(have_rx || have_tx)) {
- zilog_error("%s: no devices found\n", adap->name);
- goto out_nodev;
- }
+ mutex_lock(&ir_devices_lock);
- printk(KERN_INFO "lirc_zilog: chip found with %s\n",
- have_rx && have_tx ? "RX and TX" :
- have_rx ? "RX only" : "TX only");
+ /* Use a single struct IR instance for both the Rx and Tx functions */
+ ir = find_ir_device_by_adapter(adap);
+ if (ir == NULL) {
+ ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+ if (ir == NULL) {
+ ret = -ENOMEM;
+ goto out_no_ir;
+ }
+ /* store for use in ir_probe() again, and open() later on */
+ ret = add_ir_device(ir);
+ if (ret)
+ goto out_free_ir;
+
+ ir->adapter = adap;
+ mutex_init(&ir->ir_lock);
+
+ /* set lirc_dev stuff */
+ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l.minor = minor; /* module option */
+ ir->l.code_length = 13;
+ ir->l.rbuf = NULL;
+ ir->l.fops = &lirc_fops;
+ ir->l.data = ir;
+ ir->l.dev = &adap->dev;
+ ir->l.sample_rate = 0;
+ }
- ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+ if (tx_probe) {
+ /* Set up a struct IR_tx instance */
+ ir->tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
+ if (ir->tx == NULL) {
+ ret = -ENOMEM;
+ goto out_free_xx;
+ }
- if (!ir)
- goto out_nomem;
+ ir->tx->c = client;
+ ir->tx->need_boot = 1;
+ ir->tx->post_tx_ready_poll =
+ (id->driver_data & ID_FLAG_HDPVR) ? false : true;
+ } else {
+ /* Set up a struct IR_rx instance */
+ ir->rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
+ if (ir->rx == NULL) {
+ ret = -ENOMEM;
+ goto out_free_xx;
+ }
- ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2);
- if (ret)
- goto out_nomem;
+ ret = lirc_buffer_init(&ir->rx->buf, 2, BUFLEN / 2);
+ if (ret)
+ goto out_free_xx;
- mutex_init(&ir->ir_lock);
- mutex_init(&ir->buf_lock);
- ir->need_boot = 1;
- ir->is_hdpvr = (id->driver_data & ID_FLAG_HDPVR) ? true : false;
+ mutex_init(&ir->rx->buf_lock);
+ ir->rx->c = client;
+ ir->rx->hdpvr_data_fmt =
+ (id->driver_data & ID_FLAG_HDPVR) ? true : false;
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
- ir->l.minor = -1;
+ /* set lirc_dev stuff */
+ ir->l.rbuf = &ir->rx->buf;
+ }
- /* I2C attach to device */
i2c_set_clientdata(client, ir);
- /* initialise RX device */
- if (have_rx) {
- DECLARE_COMPLETION(tn);
- memcpy(&ir->c_rx, client, sizeof(struct i2c_client));
-
- ir->c_rx.addr = 0x71;
- strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME,
- I2C_NAME_SIZE);
+ /* Proceed only if we have the required Tx and Rx clients ready to go */
+ if (ir->tx == NULL ||
+ (ir->rx == NULL && !tx_only)) {
+ zilog_info("probe of IR %s on %s (i2c-%d) done. Waiting on "
+ "IR %s.\n", tx_probe ? "Tx" : "Rx", adap->name,
+ adap->nr, tx_probe ? "Rx" : "Tx");
+ goto out_ok;
+ }
+ /* initialise RX device */
+ if (ir->rx != NULL) {
/* try to fire up polling thread */
- ir->t_notify = &tn;
- ir->task = kthread_run(lirc_thread, ir, "lirc_zilog");
- if (IS_ERR(ir->task)) {
- ret = PTR_ERR(ir->task);
- zilog_error("lirc_register_driver: cannot run "
- "poll thread %d\n", ret);
- goto err;
+ ir->rx->task = kthread_run(lirc_thread, ir,
+ "zilog-rx-i2c-%d", adap->nr);
+ if (IS_ERR(ir->rx->task)) {
+ ret = PTR_ERR(ir->rx->task);
+ zilog_error("%s: could not start IR Rx polling thread"
+ "\n", __func__);
+ goto out_free_xx;
}
- wait_for_completion(&tn);
- ir->t_notify = NULL;
- ir->have_rx = 1;
- }
-
- /* initialise TX device */
- if (have_tx) {
- memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
- ir->c_tx.addr = 0x70;
- strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
- I2C_NAME_SIZE);
- ir->have_tx = 1;
}
- /* set lirc_dev stuff */
- ir->l.code_length = 13;
- ir->l.rbuf = &ir->buf;
- ir->l.fops = &lirc_fops;
- ir->l.data = ir;
- ir->l.minor = minor;
- ir->l.dev = &adap->dev;
- ir->l.sample_rate = 0;
-
/* register with lirc */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- zilog_error("ir_attach: \"minor\" must be between 0 and %d "
- "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor);
+ zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+ __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
- goto err;
+ goto out_free_thread;
}
- /* store this for getting back in open() later on */
- ir_devices[ir->l.minor] = ir;
-
/*
* if we have the tx device, load the 'firmware'. We do this
* after registering with lirc as otherwise hotplug seems to take
* 10s to create the lirc device.
*/
- if (have_tx) {
- /* Special TX init */
- ret = tx_init(ir);
- if (ret != 0)
- goto err;
- }
+ ret = tx_init(ir->tx);
+ if (ret != 0)
+ goto out_unregister;
+ zilog_info("probe of IR %s on %s (i2c-%d) done. IR unit ready.\n",
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+out_ok:
+ mutex_unlock(&ir_devices_lock);
return 0;
-err:
- /* undo everything, hopefully... */
- if (ir->c_rx.addr)
- ir_remove(&ir->c_rx);
- if (ir->c_tx.addr)
- ir_remove(&ir->c_tx);
- return ret;
-
-out_nodev:
- zilog_error("no device found\n");
- return -ENODEV;
-
-out_nomem:
- zilog_error("memory allocation failure\n");
+out_unregister:
+ lirc_unregister_driver(ir->l.minor);
+out_free_thread:
+ destroy_rx_kthread(ir->rx);
+out_free_xx:
+ if (ir->rx != NULL) {
+ if (ir->rx->buf.fifo_initialized)
+ lirc_buffer_free(&ir->rx->buf);
+ if (ir->rx->c != NULL)
+ i2c_set_clientdata(ir->rx->c, NULL);
+ kfree(ir->rx);
+ }
+ if (ir->tx != NULL) {
+ if (ir->tx->c != NULL)
+ i2c_set_clientdata(ir->tx->c, NULL);
+ kfree(ir->tx);
+ }
+out_free_ir:
+ del_ir_device(ir);
kfree(ir);
- return -ENOMEM;
-}
-
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
- /* nothing */
- return 0;
+out_no_ir:
+ zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+ __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
+ ret);
+ mutex_unlock(&ir_devices_lock);
+ return ret;
}
static int __init zilog_init(void)
@@ -1361,6 +1418,7 @@ static int __init zilog_init(void)
zilog_notify("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
+ mutex_init(&ir_devices_lock);
request_module("firmware_class");
@@ -1386,7 +1444,8 @@ module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
- "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver");
+ "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
+ "Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
@@ -1397,8 +1456,5 @@ MODULE_PARM_DESC(minor, "Preferred minor device number");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
-module_param(disable_rx, bool, 0644);
-MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device");
-
-module_param(disable_tx, bool, 0644);
-MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device");
+module_param(tx_only, bool, 0644);
+MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 8fe017c3721f..eb9b9f1bc138 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1450,29 +1450,55 @@ static struct video_device tm6000_template = {
* ------------------------------------------------------------------
*/
-int tm6000_v4l2_register(struct tm6000_core *dev)
+static struct video_device *vdev_init(struct tm6000_core *dev,
+ const struct video_device
+ *template, const char *type_name)
{
- int ret = -1;
struct video_device *vfd;
vfd = video_device_alloc();
- if(!vfd) {
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->release = video_device_release;
+ vfd->debug = tm6000_debug;
+ vfd->lock = &dev->lock;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
+
+ video_set_drvdata(vfd, dev);
+ return vfd;
+}
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+
+ dev->vfd = vdev_init(dev, &tm6000_template, "video");
+
+ if (!dev->vfd) {
+ printk(KERN_INFO "%s: can't register video device\n",
+ dev->name);
return -ENOMEM;
}
- dev->vfd = vfd;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
- memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
- dev->vfd->debug = tm6000_debug;
- dev->vfd->lock = &dev->lock;
+ ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
- vfd->v4l2_dev = &dev->v4l2_dev;
- video_set_drvdata(vfd, dev);
+ if (ret < 0) {
+ printk(KERN_INFO "%s: can't register video device\n",
+ dev->name);
+ return ret;
+ }
+
+ printk(KERN_INFO "%s: registered device %s\n",
+ dev->name, video_device_node_name(dev->vfd));
- ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
return ret;
}
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c43ef48b1a0f..396277216e4f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_R3964) += n_r3964.o
obj-y += vt/
+obj-$(CONFIG_HVC_DRIVER) += hvc/
+obj-y += serial/
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 000000000000..e6bed5f177ff
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
+obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_TILE) += hvc_tile.o
+obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
+obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
+obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
+obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
+obj-$(CONFIG_HVC_XEN) += hvc_xen.o
+obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
+obj-$(CONFIG_HVCS) += hvcs.o
+obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
diff --git a/drivers/char/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
index 5fe4631e2a61..5fe4631e2a61 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/tty/hvc/hvc_beat.c
diff --git a/drivers/char/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e9cba13ee800..e9cba13ee800 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
diff --git a/drivers/char/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 54381eba4e4a..54381eba4e4a 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
diff --git a/drivers/char/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 6470f63deb4b..6470f63deb4b 100644
--- a/drivers/char/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
diff --git a/drivers/char/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index 2623e177e8d6..2623e177e8d6 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
diff --git a/drivers/char/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
index 21c54955084e..21c54955084e 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/tty/hvc/hvc_iseries.c
diff --git a/drivers/char/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index c3425bb3a1f6..c3425bb3a1f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
diff --git a/drivers/char/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 61c4a61558d9..61c4a61558d9 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
diff --git a/drivers/char/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 7a84a0595477..7a84a0595477 100644
--- a/drivers/char/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
diff --git a/drivers/char/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index b0957e61a7be..b0957e61a7be 100644
--- a/drivers/char/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
diff --git a/drivers/char/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 5e2f52b33327..5e2f52b33327 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
diff --git a/drivers/char/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..3740e327f180 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
diff --git a/drivers/char/hvcs.c b/drivers/tty/hvc/hvcs.c
index bedc6c1b6fa5..bedc6c1b6fa5 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
diff --git a/drivers/char/hvsi.c b/drivers/tty/hvc/hvsi.c
index 67a75a502c01..67a75a502c01 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
diff --git a/drivers/char/virtio_console.c b/drivers/tty/hvc/virtio_console.c
index 896a2ced1d27..896a2ced1d27 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/tty/hvc/virtio_console.c
diff --git a/drivers/serial/21285.c b/drivers/tty/serial/21285.c
index d89aa38c5cf0..d89aa38c5cf0 100644
--- a/drivers/serial/21285.c
+++ b/drivers/tty/serial/21285.c
diff --git a/drivers/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..be0ebce36e54 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
diff --git a/drivers/serial/68328serial.h b/drivers/tty/serial/68328serial.h
index 664ceb0a158c..664ceb0a158c 100644
--- a/drivers/serial/68328serial.h
+++ b/drivers/tty/serial/68328serial.h
diff --git a/drivers/serial/68360serial.c b/drivers/tty/serial/68360serial.c
index 88b13356ec10..88b13356ec10 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
diff --git a/drivers/serial/8250.c b/drivers/tty/serial/8250.c
index b25e6e490530..b25e6e490530 100644
--- a/drivers/serial/8250.c
+++ b/drivers/tty/serial/8250.c
diff --git a/drivers/serial/8250.h b/drivers/tty/serial/8250.h
index 6e19ea3e48d5..6e19ea3e48d5 100644
--- a/drivers/serial/8250.h
+++ b/drivers/tty/serial/8250.h
diff --git a/drivers/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
index 9c10262f2469..9c10262f2469 100644
--- a/drivers/serial/8250_accent.c
+++ b/drivers/tty/serial/8250_accent.c
diff --git a/drivers/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
index b0ce8c56f1a4..b0ce8c56f1a4 100644
--- a/drivers/serial/8250_acorn.c
+++ b/drivers/tty/serial/8250_acorn.c
diff --git a/drivers/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
index 3bfe0f7b26fb..3bfe0f7b26fb 100644
--- a/drivers/serial/8250_boca.c
+++ b/drivers/tty/serial/8250_boca.c
diff --git a/drivers/serial/8250_early.c b/drivers/tty/serial/8250_early.c
index eaafb98debed..eaafb98debed 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/tty/serial/8250_early.c
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
index 567143ace159..567143ace159 100644
--- a/drivers/serial/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250_exar_st16c554.c
diff --git a/drivers/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
index 6375d68b7913..6375d68b7913 100644
--- a/drivers/serial/8250_fourport.c
+++ b/drivers/tty/serial/8250_fourport.c
diff --git a/drivers/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
index d8c0ffbfa6e3..d8c0ffbfa6e3 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/tty/serial/8250_gsc.c
diff --git a/drivers/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
index c13438c93012..c13438c93012 100644
--- a/drivers/serial/8250_hp300.c
+++ b/drivers/tty/serial/8250_hp300.c
diff --git a/drivers/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
index 7609150e7d5e..7609150e7d5e 100644
--- a/drivers/serial/8250_hub6.c
+++ b/drivers/tty/serial/8250_hub6.c
diff --git a/drivers/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
index d10be944ad44..d10be944ad44 100644
--- a/drivers/serial/8250_mca.c
+++ b/drivers/tty/serial/8250_mca.c
diff --git a/drivers/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 8b8930f700b5..8b8930f700b5 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
diff --git a/drivers/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index 4822cb50cd0f..4822cb50cd0f 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
diff --git a/drivers/serial/Kconfig b/drivers/tty/serial/Kconfig
index c1df7676a73d..b1682d7f1d8a 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -81,7 +81,7 @@ config SERIAL_8250_GSC
default SERIAL_8250
config SERIAL_8250_PCI
- tristate "8250/16550 PCI device support" if EMBEDDED
+ tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
default SERIAL_8250
help
@@ -90,7 +90,7 @@ config SERIAL_8250_PCI
Saves about 9K.
config SERIAL_8250_PNP
- tristate "8250/16550 PNP device support" if EMBEDDED
+ tristate "8250/16550 PNP device support" if EXPERT
depends on SERIAL_8250 && PNP
default SERIAL_8250
help
diff --git a/drivers/serial/Makefile b/drivers/tty/serial/Makefile
index 8ea92e9c73b0..8ea92e9c73b0 100644
--- a/drivers/serial/Makefile
+++ b/drivers/tty/serial/Makefile
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index f9b49b5ff5e1..f9b49b5ff5e1 100644
--- a/drivers/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
diff --git a/drivers/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 721216292a50..721216292a50 100644
--- a/drivers/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
diff --git a/drivers/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
diff --git a/drivers/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..e76d7d000128 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
diff --git a/drivers/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 095a5d562618..095a5d562618 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
diff --git a/drivers/serial/apbuart.h b/drivers/tty/serial/apbuart.h
index 5faf87c8d2bc..5faf87c8d2bc 100644
--- a/drivers/serial/apbuart.h
+++ b/drivers/tty/serial/apbuart.h
diff --git a/drivers/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2a1d52fb4936..2a1d52fb4936 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d0807..a1a0e55d0807 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
diff --git a/drivers/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index e381b895b04d..e381b895b04d 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index e95c524d9d18..e95c524d9d18 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1d5675..6d06ce1d5675 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
diff --git a/drivers/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b6acd19b458e..b6acd19b458e 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
index e072724ea754..e072724ea754 100644
--- a/drivers/serial/cpm_uart/Makefile
+++ b/drivers/tty/serial/cpm_uart/Makefile
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index b754dcf0fda5..b754dcf0fda5 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 8692ff98fc07..8692ff98fc07 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 3fc1d66e32c6..3fc1d66e32c6 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
index 10eecd6af6d4..10eecd6af6d4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index 814ac006393f..814ac006393f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
index 7194c63dcf5f..7194c63dcf5f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
diff --git a/drivers/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index bcc31f2140ac..bcc31f2140ac 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
diff --git a/drivers/serial/crisv10.h b/drivers/tty/serial/crisv10.h
index ea0beb46a10d..ea0beb46a10d 100644
--- a/drivers/serial/crisv10.h
+++ b/drivers/tty/serial/crisv10.h
diff --git a/drivers/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d776329..57421d776329 100644
--- a/drivers/serial/dz.c
+++ b/drivers/tty/serial/dz.c
diff --git a/drivers/serial/dz.h b/drivers/tty/serial/dz.h
index faf169ed27b3..faf169ed27b3 100644
--- a/drivers/serial/dz.h
+++ b/drivers/tty/serial/dz.h
diff --git a/drivers/serial/icom.c b/drivers/tty/serial/icom.c
index 53a468227056..53a468227056 100644
--- a/drivers/serial/icom.c
+++ b/drivers/tty/serial/icom.c
diff --git a/drivers/serial/icom.h b/drivers/tty/serial/icom.h
index c8029e0025c9..c8029e0025c9 100644
--- a/drivers/serial/icom.h
+++ b/drivers/tty/serial/icom.h
diff --git a/drivers/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ab93763862d5..ab93763862d5 100644
--- a/drivers/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
diff --git a/drivers/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index deb7b8d977dc..deb7b8d977dc 100644
--- a/drivers/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
diff --git a/drivers/serial/imx.c b/drivers/tty/serial/imx.c
index dfcf4b1878aa..dfcf4b1878aa 100644
--- a/drivers/serial/imx.c
+++ b/drivers/tty/serial/imx.c
diff --git a/drivers/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index ee43efc7bdcc..ee43efc7bdcc 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
diff --git a/drivers/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index fcfe82653ac8..fcfe82653ac8 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
diff --git a/drivers/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index ebff4a1d4bcc..ebff4a1d4bcc 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
diff --git a/drivers/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
index a59a9a8341d2..a59a9a8341d2 100644
--- a/drivers/serial/ip22zilog.h
+++ b/drivers/tty/serial/ip22zilog.h
diff --git a/drivers/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
index e46b6e0f8b18..e46b6e0f8b18 100644
--- a/drivers/serial/jsm/Makefile
+++ b/drivers/tty/serial/jsm/Makefile
diff --git a/drivers/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 38a509c684cd..38a509c684cd 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c63..18f548449c63 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 7960d9633c15..7960d9633c15 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 7a4a914ecff0..7a4a914ecff0 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
diff --git a/drivers/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 25a8bc565f40..25a8bc565f40 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
diff --git a/drivers/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bea5c215460c..bea5c215460c 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
diff --git a/drivers/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
index e9b7e11793b1..e9b7e11793b1 100644
--- a/drivers/serial/m32r_sio.h
+++ b/drivers/tty/serial/m32r_sio.h
diff --git a/drivers/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..4671473793e3 100644
--- a/drivers/serial/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..beb1afa27d8d 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
diff --git a/drivers/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..a1fe304f2f52 100644
--- a/drivers/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
diff --git a/drivers/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..910870edf708 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
diff --git a/drivers/serial/max3107.h b/drivers/tty/serial/max3107.h
index 7ab632392502..7ab632392502 100644
--- a/drivers/serial/max3107.h
+++ b/drivers/tty/serial/max3107.h
diff --git a/drivers/serial/mcf.c b/drivers/tty/serial/mcf.c
index 3394b7cc1722..3394b7cc1722 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
diff --git a/drivers/serial/mfd.c b/drivers/tty/serial/mfd.c
index d40010a22ecd..d40010a22ecd 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 126ec7f568ec..126ec7f568ec 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
diff --git a/drivers/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 6a9c6605666a..6a9c6605666a 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
diff --git a/drivers/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index b62857bf2fdb..b62857bf2fdb 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
diff --git a/drivers/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
index d1ef43af397c..d1ef43af397c 100644
--- a/drivers/serial/mrst_max3110.h
+++ b/drivers/tty/serial/mrst_max3110.h
diff --git a/drivers/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e64..8e43a7b69e64 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
diff --git a/drivers/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e98..f6ca9ca79e98 100644
--- a/drivers/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
diff --git a/drivers/serial/mux.c b/drivers/tty/serial/mux.c
index 9711e06a8374..9711e06a8374 100644
--- a/drivers/serial/mux.c
+++ b/drivers/tty/serial/mux.c
diff --git a/drivers/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 7735c9f35fa0..7735c9f35fa0 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
diff --git a/drivers/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index de173671e3d0..de173671e3d0 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
diff --git a/drivers/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5c7abe4c94dd..5c7abe4c94dd 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
diff --git a/drivers/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7f2f01058789..7f2f01058789 100644
--- a/drivers/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
diff --git a/drivers/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 70a61458ec42..70a61458ec42 100644
--- a/drivers/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
diff --git a/drivers/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5b9cde79e4ea..5b9cde79e4ea 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
diff --git a/drivers/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fbb1b20..cbc34fbb1b20 100644
--- a/drivers/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 0aa75a97531c..0aa75a97531c 100644
--- a/drivers/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
diff --git a/drivers/serial/pxa.c b/drivers/tty/serial/pxa.c
index 1102a39b44f5..1102a39b44f5 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
diff --git a/drivers/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
index fed1a9a1ffb4..fed1a9a1ffb4 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/tty/serial/s3c2400.c
diff --git a/drivers/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
index 73f089d3efd6..73f089d3efd6 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/tty/serial/s3c2410.c
diff --git a/drivers/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
index 1700b1a2fb7e..1700b1a2fb7e 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/tty/serial/s3c2412.c
diff --git a/drivers/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
index 094cc3904b13..094cc3904b13 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/tty/serial/s3c2440.c
diff --git a/drivers/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
index fad6083ca427..fad6083ca427 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/tty/serial/s3c24a0.c
diff --git a/drivers/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
index 4be92ab50058..4be92ab50058 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/tty/serial/s3c6400.c
diff --git a/drivers/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index 6ebccd70a707..6ebccd70a707 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
diff --git a/drivers/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 2199d819a987..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
diff --git a/drivers/serial/samsung.c b/drivers/tty/serial/samsung.c
index 2335edafe903..2335edafe903 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
diff --git a/drivers/serial/samsung.h b/drivers/tty/serial/samsung.h
index 0ac06a07d25f..0ac06a07d25f 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
diff --git a/drivers/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index a2f2b3254499..a2f2b3254499 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
diff --git a/drivers/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad2b242..75038ad2b242 100644
--- a/drivers/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
diff --git a/drivers/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 460a72d91bb7..460a72d91bb7 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
diff --git a/drivers/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..93760b2ea172 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
diff --git a/drivers/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index b1962025b1aa..b1962025b1aa 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
index ea744707c4d6..ea744707c4d6 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/tty/serial/serial_lh7a40x.c
diff --git a/drivers/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index c50e9fbbf743..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
diff --git a/drivers/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 92c91c83edde..92c91c83edde 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
diff --git a/drivers/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b223d6cbf33a..b223d6cbf33a 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
diff --git a/drivers/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index cff9a306660f..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
diff --git a/drivers/serial/suncore.c b/drivers/tty/serial/suncore.c
index 6381a0282ee7..6381a0282ee7 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
diff --git a/drivers/serial/suncore.h b/drivers/tty/serial/suncore.h
index db2057936c31..db2057936c31 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/tty/serial/suncore.h
diff --git a/drivers/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index c9014868297d..c9014868297d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
diff --git a/drivers/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5b246b18f42f..5b246b18f42f 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
diff --git a/drivers/serial/sunsab.h b/drivers/tty/serial/sunsab.h
index b78e1f7b8050..b78e1f7b8050 100644
--- a/drivers/serial/sunsab.h
+++ b/drivers/tty/serial/sunsab.h
diff --git a/drivers/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 551ebfe3ccbb..551ebfe3ccbb 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
diff --git a/drivers/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c1967ac1c07f..c1967ac1c07f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
diff --git a/drivers/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
index 5dec7b47cc38..5dec7b47cc38 100644
--- a/drivers/serial/sunzilog.h
+++ b/drivers/tty/serial/sunzilog.h
diff --git a/drivers/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 1f36b7eb7351..1f36b7eb7351 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
diff --git a/drivers/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index 7e566766bc43..7e566766bc43 100644
--- a/drivers/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
diff --git a/drivers/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d2fce865b731..d2fce865b731 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
diff --git a/drivers/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3f4848e2174a..3f4848e2174a 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab4fa68..3beb6ab4fa68 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
diff --git a/drivers/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 322bf56c0d89..322bf56c0d89 100644
--- a/drivers/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
diff --git a/drivers/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e70315..1a7fd3e70315 100644
--- a/drivers/serial/zs.c
+++ b/drivers/tty/serial/zs.c
diff --git a/drivers/serial/zs.h b/drivers/tty/serial/zs.h
index aa921b57d827..aa921b57d827 100644
--- a/drivers/serial/zs.h
+++ b/drivers/tty/serial/zs.h
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index bcc24779ba0e..18d02e32a3d5 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -123,9 +123,9 @@ config USB_OTG
config USB_OTG_WHITELIST
bool "Rely on OTG Targeted Peripherals List"
- depends on USB_OTG || EMBEDDED
+ depends on USB_OTG || EXPERT
default y if USB_OTG
- default n if EMBEDDED
+ default n if EXPERT
help
If you say Y here, the "otg_whitelist.h" file will be used as a
product whitelist, so USB peripherals not listed there will be
@@ -141,7 +141,7 @@ config USB_OTG_WHITELIST
config USB_OTG_BLACKLIST_HUB
bool "Disable external hubs"
- depends on USB_OTG || EMBEDDED
+ depends on USB_OTG || EXPERT
help
If you say Y here, then Linux will refuse to enumerate
external hubs. OTG hosts are allowed to reduce hardware
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d916ac04abab..6bafb51bb437 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1227,7 +1227,7 @@ config FB_CARILLO_RANCH
config FB_INTEL
tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED
+ depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c789c46e38af..b224396b86d5 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -21,7 +21,7 @@
#define MAX_BRIGHTNESS (0xFF)
#define MIN_BRIGHTNESS (0)
-#define CURRENT_MASK (0x1F << 1)
+#define CURRENT_BITMASK (0x1F << 1)
struct pm860x_backlight_data {
struct pm860x_chip *chip;
@@ -85,7 +85,7 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
if ((data->current_brightness == 0) && brightness) {
if (data->iset) {
ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
- CURRENT_MASK, data->iset);
+ CURRENT_BITMASK, data->iset);
if (ret < 0)
goto out;
}
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5a35f22372b9..2209e354f531 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -5,7 +5,7 @@
menu "Console display driver support"
config VGA_CONSOLE
- bool "VGA text console" if EMBEDDED || !X86
+ bool "VGA text console" if EXPERT || !X86
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
default y
help
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index ef8d9d558fc7..4fb5b2bf2348 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
-/* A PCI device has it's own struct device and so does a virtio device so
- * we create a place for the virtio devices to show up in sysfs. I think it
- * would make more sense for virtio to not insist on having it's own device. */
-static struct device *virtio_pci_root;
-
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
@@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
if (vp_dev == NULL)
return -ENOMEM;
- vp_dev->vdev.dev.parent = virtio_pci_root;
+ vp_dev->vdev.dev.parent = &pci_dev->dev;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
@@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = {
static int __init virtio_pci_init(void)
{
- int err;
-
- virtio_pci_root = root_device_register("virtio-pci");
- if (IS_ERR(virtio_pci_root))
- return PTR_ERR(virtio_pci_root);
-
- err = pci_register_driver(&virtio_pci_driver);
- if (err)
- root_device_unregister(virtio_pci_root);
-
- return err;
+ return pci_register_driver(&virtio_pci_driver);
}
module_init(virtio_pci_init);
@@ -735,7 +720,6 @@ module_init(virtio_pci_init);
static void __exit virtio_pci_exit(void)
{
pci_unregister_driver(&virtio_pci_driver);
- root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 1c1236087f78..bbd000f88af7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -122,6 +122,7 @@ static ssize_t xenbus_file_read(struct file *filp,
int ret;
mutex_lock(&u->reply_mutex);
+again:
while (list_empty(&u->read_buffers)) {
mutex_unlock(&u->reply_mutex);
if (filp->f_flags & O_NONBLOCK)
@@ -144,7 +145,7 @@ static ssize_t xenbus_file_read(struct file *filp,
i += sz - ret;
rb->cons += sz - ret;
- if (ret != sz) {
+ if (ret != 0) {
if (i == 0)
i = -EFAULT;
goto out;
@@ -160,6 +161,8 @@ static ssize_t xenbus_file_read(struct file *filp,
struct read_buffer, list);
}
}
+ if (i == 0)
+ goto again;
out:
mutex_unlock(&u->reply_mutex);
@@ -407,6 +410,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
mutex_lock(&u->reply_mutex);
rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+ wake_up(&u->read_waitq);
mutex_unlock(&u->reply_mutex);
}
@@ -455,7 +459,7 @@ static ssize_t xenbus_file_write(struct file *filp,
ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
- if (ret == len) {
+ if (ret != 0) {
rc = -EFAULT;
goto out;
}
@@ -488,21 +492,6 @@ static ssize_t xenbus_file_write(struct file *filp,
msg_type = u->u.msg.type;
switch (msg_type) {
- case XS_TRANSACTION_START:
- case XS_TRANSACTION_END:
- case XS_DIRECTORY:
- case XS_READ:
- case XS_GET_PERMS:
- case XS_RELEASE:
- case XS_GET_DOMAIN_PATH:
- case XS_WRITE:
- case XS_MKDIR:
- case XS_RM:
- case XS_SET_PERMS:
- /* Send out a transaction */
- ret = xenbus_write_transaction(msg_type, u);
- break;
-
case XS_WATCH:
case XS_UNWATCH:
/* (Un)Ask for some path to be watched for changes */
@@ -510,7 +499,8 @@ static ssize_t xenbus_file_write(struct file *filp,
break;
default:
- ret = -EINVAL;
+ /* Send out a transaction */
+ ret = xenbus_write_transaction(msg_type, u);
break;
}
if (ret != 0)
@@ -555,6 +545,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
struct xenbus_file_priv *u = filp->private_data;
struct xenbus_transaction_holder *trans, *tmp;
struct watch_adapter *watch, *tmp_watch;
+ struct read_buffer *rb, *tmp_rb;
/*
* No need for locking here because there are no other users,
@@ -573,6 +564,10 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
free_watch_adapter(watch);
}
+ list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+ list_del(&rb->list);
+ kfree(rb);
+ }
kfree(u);
return 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index 9a7921ae4763..3db9caa57edc 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -50,7 +50,7 @@ config EXPORTFS
tristate
config FILE_LOCKING
- bool "Enable POSIX file locking API" if EMBEDDED
+ bool "Enable POSIX file locking API" if EXPERT
default y
help
This option enables standard file locking support, required
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index ede98300a8cd..65829d32128c 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -79,11 +79,11 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
- mid_entry->tsk,
+ mid_entry->callback_data,
mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
@@ -218,11 +218,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
mid_entry = list_entry(tmp3, struct mid_q_entry,
qhead);
seq_printf(m, "\tState: %d com: %d pid:"
- " %d tsk: %p mid %d\n",
+ " %d cbdata: %p mid %d\n",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
- mid_entry->tsk,
+ mid_entry->callback_data,
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
@@ -331,7 +331,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
atomic_read(&totSmBufAllocCount));
#endif /* CONFIG_CIFS_STATS2 */
- seq_printf(m, "Operations (MIDs): %d\n", midCount.counter);
+ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
seq_printf(m,
"\n%d session %d share reconnects\n",
tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 7852cd677051..ac51cd2d33ae 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -40,6 +40,7 @@
#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */
#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
+#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 430f510a1720..fc0fd4fde306 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,10 +44,14 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
int charlen, outlen = 0;
int maxwords = maxbytes / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
+ __u16 ftmp;
- for (i = 0; i < maxwords && from[i]; i++) {
- charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
- NLS_MAX_CHARSET_SIZE);
+ for (i = 0; i < maxwords; i++) {
+ ftmp = get_unaligned_le16(&from[i]);
+ if (ftmp == 0)
+ break;
+
+ charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
if (charlen > 0)
outlen += charlen;
else
@@ -58,9 +62,9 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
}
/*
- * cifs_mapchar - convert a little-endian char to proper char in codepage
+ * cifs_mapchar - convert a host-endian char to proper char in codepage
* @target - where converted character should be copied
- * @src_char - 2 byte little-endian source character
+ * @src_char - 2 byte host-endian source character
* @cp - codepage to which character should be converted
* @mapchar - should character be mapped according to mapchars mount option?
*
@@ -69,7 +73,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
* enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
*/
static int
-cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
bool mapchar)
{
int len = 1;
@@ -82,7 +86,7 @@ cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
* build_path_from_dentry are modified, as they use slash as
* separator.
*/
- switch (le16_to_cpu(src_char)) {
+ switch (src_char) {
case UNI_COLON:
*target = ':';
break;
@@ -109,8 +113,7 @@ out:
return len;
cp_convert:
- len = cp->uni2char(le16_to_cpu(src_char), target,
- NLS_MAX_CHARSET_SIZE);
+ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
if (len <= 0) {
*target = '?';
len = 1;
@@ -149,6 +152,7 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
int nullsize = nls_nullsize(codepage);
int fromwords = fromlen / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
+ __u16 ftmp;
/*
* because the chars can be of varying widths, we need to take care
@@ -158,19 +162,23 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
*/
safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
- for (i = 0; i < fromwords && from[i]; i++) {
+ for (i = 0; i < fromwords; i++) {
+ ftmp = get_unaligned_le16(&from[i]);
+ if (ftmp == 0)
+ break;
+
/*
* check to see if converting this character might make the
* conversion bleed into the null terminator
*/
if (outlen >= safelen) {
- charlen = cifs_mapchar(tmp, from[i], codepage, mapchar);
+ charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
if ((outlen + charlen) > (tolen - nullsize))
break;
}
/* put converted char into 'to' buffer */
- charlen = cifs_mapchar(&to[outlen], from[i], codepage, mapchar);
+ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
outlen += charlen;
}
@@ -193,24 +201,21 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
{
int charlen;
int i;
- wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+ wchar_t wchar_to; /* needed to quiet sparse */
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
-
- /* works for 2.4.0 kernel or later */
- charlen = codepage->char2uni(from, len, &wchar_to[i]);
+ charlen = codepage->char2uni(from, len, &wchar_to);
if (charlen < 1) {
- cERROR(1, "strtoUCS: char2uni of %d returned %d",
- (int)*from, charlen);
+ cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+ *from, charlen);
/* A question mark */
- to[i] = cpu_to_le16(0x003f);
+ wchar_to = 0x003f;
charlen = 1;
- } else
- to[i] = cpu_to_le16(wchar_to[i]);
-
+ }
+ put_unaligned_le16(wchar_to, &to[i]);
}
- to[i] = 0;
+ put_unaligned_le16(0, &to[i]);
return i;
}
@@ -252,3 +257,79 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
return dst;
}
+/*
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ */
+int
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+ const struct nls_table *cp, int mapChars)
+{
+ int i, j, charlen;
+ int len_remaining = maxlen;
+ char src_char;
+ __u16 temp;
+
+ if (!mapChars)
+ return cifs_strtoUCS(target, source, PATH_MAX, cp);
+
+ for (i = 0, j = 0; i < maxlen; j++) {
+ src_char = source[i];
+ switch (src_char) {
+ case 0:
+ put_unaligned_le16(0, &target[j]);
+ goto ctoUCS_out;
+ case ':':
+ temp = UNI_COLON;
+ break;
+ case '*':
+ temp = UNI_ASTERIK;
+ break;
+ case '?':
+ temp = UNI_QUESTION;
+ break;
+ case '<':
+ temp = UNI_LESSTHAN;
+ break;
+ case '>':
+ temp = UNI_GRTRTHAN;
+ break;
+ case '|':
+ temp = UNI_PIPE;
+ break;
+ /*
+ * FIXME: We can not handle remapping backslash (UNI_SLASH)
+ * until all the calls to build_path_from_dentry are modified,
+ * as they use backslash as separator.
+ */
+ default:
+ charlen = cp->char2uni(source+i, len_remaining,
+ &temp);
+ /*
+ * if no match, use question mark, which at least in
+ * some cases serves as wild card
+ */
+ if (charlen < 1) {
+ temp = 0x003f;
+ charlen = 1;
+ }
+ len_remaining -= charlen;
+ /*
+ * character may take more than one byte in the source
+ * string, but will take exactly two bytes in the
+ * target string
+ */
+ i += charlen;
+ continue;
+ }
+ put_unaligned_le16(temp, &target[j]);
+ i++; /* move to next char in source string */
+ len_remaining--;
+ }
+
+ctoUCS_out:
+ return i;
+}
+
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index a437ec391a01..1e7636b145a8 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -41,9 +41,12 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
;
-/* security id for everyone */
+/* security id for everyone/world system group */
static const struct cifs_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+/* security id for Authenticated Users system group */
+static const struct cifs_sid sid_authusers = {
+ 1, 1, {0, 0, 0, 0, 0, 5}, {11} };
/* group users */
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
@@ -365,7 +368,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
if (num_aces > 0) {
umode_t user_mask = S_IRWXU;
umode_t group_mask = S_IRWXG;
- umode_t other_mask = S_IRWXO;
+ umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
GFP_KERNEL);
@@ -390,6 +393,12 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
ppace[i]->type,
&fattr->cf_mode,
&other_mask);
+ if (compare_sids(&(ppace[i]->sid), &sid_authusers))
+ access_flags_to_mode(ppace[i]->access_req,
+ ppace[i]->type,
+ &fattr->cf_mode,
+ &other_mask);
+
/* memcpy((void *)(&(cifscred->aces[i])),
(void *)ppace[i],
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d9f652a522a6..a8323f1dc1c4 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -77,7 +77,11 @@ unsigned int cifs_max_pending = CIFS_MAX_REQ;
module_param(cifs_max_pending, int, 0);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
"Default: 50 Range: 2 to 256");
-
+unsigned short echo_retries = 5;
+module_param(echo_retries, ushort, 0644);
+MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+ "reconnecting server. Default: 5. 0 means "
+ "never reconnect.");
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
@@ -729,6 +733,25 @@ const struct file_operations cifs_file_ops = {
.setlease = cifs_setlease,
};
+const struct file_operations cifs_file_strict_ops = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = cifs_strict_readv,
+ .aio_write = cifs_file_aio_write,
+ .open = cifs_open,
+ .release = cifs_close,
+ .lock = cifs_lock,
+ .fsync = cifs_strict_fsync,
+ .flush = cifs_flush,
+ .mmap = cifs_file_strict_mmap,
+ .splice_read = generic_file_splice_read,
+ .llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+ .unlocked_ioctl = cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+ .setlease = cifs_setlease,
+};
+
const struct file_operations cifs_file_direct_ops = {
/* no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
@@ -747,6 +770,7 @@ const struct file_operations cifs_file_direct_ops = {
.llseek = cifs_llseek,
.setlease = cifs_setlease,
};
+
const struct file_operations cifs_file_nobrl_ops = {
.read = do_sync_read,
.write = do_sync_write,
@@ -765,6 +789,24 @@ const struct file_operations cifs_file_nobrl_ops = {
.setlease = cifs_setlease,
};
+const struct file_operations cifs_file_strict_nobrl_ops = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = cifs_strict_readv,
+ .aio_write = cifs_file_aio_write,
+ .open = cifs_open,
+ .release = cifs_close,
+ .fsync = cifs_strict_fsync,
+ .flush = cifs_flush,
+ .mmap = cifs_file_strict_mmap,
+ .splice_read = generic_file_splice_read,
+ .llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+ .unlocked_ioctl = cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+ .setlease = cifs_setlease,
+};
+
const struct file_operations cifs_file_direct_nobrl_ops = {
/* no mmap, no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 851030f74939..f23206d46531 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,6 +61,7 @@ extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
struct dentry *);
extern int cifs_revalidate_file(struct file *filp);
extern int cifs_revalidate_dentry(struct dentry *);
+extern void cifs_invalidate_mapping(struct inode *inode);
extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int cifs_setattr(struct dentry *, struct iattr *);
@@ -72,19 +73,25 @@ extern const struct inode_operations cifs_dfs_referral_inode_operations;
/* Functions related to files and directories */
extern const struct file_operations cifs_file_ops;
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
-extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_direct_nobrl_ops;
+extern const struct file_operations cifs_file_strict_nobrl_ops;
extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file);
extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
- size_t read_size, loff_t *poffset);
+ size_t read_size, loff_t *poffset);
+extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
size_t write_size, loff_t *poffset);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, int);
+extern int cifs_strict_fsync(struct file *, int);
extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
@@ -118,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.68"
+#define CIFS_VERSION "1.69"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 606ca8bb7102..5bfb75346cb0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -161,6 +161,7 @@ struct TCP_Server_Info {
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
struct sockaddr_storage dstaddr;
@@ -168,25 +169,16 @@ struct TCP_Server_Info {
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
struct list_head pending_mid_q;
- void *Server_NlsInfo; /* BB - placeholder for future NLS info */
- unsigned short server_codepage; /* codepage for the server */
- enum protocolEnum protocolType;
- char versionMajor;
- char versionMinor;
- bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
-#ifdef CONFIG_CIFS_STATS2
- atomic_t inSend; /* requests trying to send */
- atomic_t num_waiters; /* blocked waiting to get in sendrecv */
-#endif
- enum statusEnum tcpStatus; /* what we think the status is */
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
char secMode;
+ bool session_estab; /* mark when very first sess is established */
+ u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
unsigned int maxReq; /* Clients should submit no more */
/* than maxReq distinct unanswered SMBs to the server when using */
@@ -199,8 +191,6 @@ struct TCP_Server_Info {
unsigned int max_vcs; /* maximum number of smb sessions, at least
those that can be specified uniquely with
vcnumbers */
- char sessid[4]; /* unique token id for this session */
- /* (returned on Negotiate */
int capabilities; /* allow selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u16 CurrentMid; /* multiplex id - rotating counter */
@@ -210,17 +200,20 @@ struct TCP_Server_Info {
__u32 sequence_number; /* for signing, protected by srv_mutex */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
- u16 dialect; /* dialect index that server chose */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
/* extended security flavors that server supports */
+ bool sec_ntlmssp; /* supports NTLMSSP */
+ bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
- bool sec_kerberosu2u; /* supports U2U Kerberos */
- bool sec_ntlmssp; /* supports NTLMSSP */
- bool session_estab; /* mark when very first sess is established */
+ struct delayed_work echo; /* echo ping workqueue job */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
+#ifdef CONFIG_CIFS_STATS2
+ atomic_t inSend; /* requests trying to send */
+ atomic_t num_waiters; /* blocked waiting to get in sendrecv */
+#endif
};
/*
@@ -446,11 +439,11 @@ struct cifsInodeInfo {
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
- unsigned long time; /* jiffies of last update/check of inode */
- bool clientCanCacheRead:1; /* read oplock */
- bool clientCanCacheAll:1; /* read and writebehind oplock */
- bool delete_pending:1; /* DELETE_ON_CLOSE is set */
- bool invalid_mapping:1; /* pagecache is invalid */
+ bool clientCanCacheRead; /* read oplock */
+ bool clientCanCacheAll; /* read and writebehind oplock */
+ bool delete_pending; /* DELETE_ON_CLOSE is set */
+ bool invalid_mapping; /* pagecache is invalid */
+ unsigned long time; /* jiffies of last update of inode */
u64 server_eof; /* current file size on server */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
@@ -508,6 +501,18 @@ static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
#endif
+struct mid_q_entry;
+
+/*
+ * This is the prototype for the mid callback function. When creating one,
+ * take special care to avoid deadlocks. Things to bear in mind:
+ *
+ * - it will be called by cifsd
+ * - the GlobalMid_Lock will be held
+ * - the mid will be removed from the pending_mid_q list
+ */
+typedef void (mid_callback_t)(struct mid_q_entry *mid);
+
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
@@ -519,7 +524,8 @@ struct mid_q_entry {
unsigned long when_sent; /* time when smb send finished */
unsigned long when_received; /* when demux complete (taken off wire) */
#endif
- struct task_struct *tsk; /* task waiting for response */
+ mid_callback_t *callback; /* call completion callback */
+ void *callback_data; /* general purpose pointer for callback */
struct smb_hdr *resp_buf; /* response buffer */
int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */
@@ -622,12 +628,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define CIFS_IOVEC 4 /* array of response buffers */
/* Type of Request to SendReceive2 */
-#define CIFS_STD_OP 0 /* normal request timeout */
-#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */
-#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */
-#define CIFS_BLOCKING_OP 4 /* operation can block */
-#define CIFS_ASYNC_OP 8 /* do not wait for response */
-#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */
+#define CIFS_BLOCKING_OP 1 /* operation can block */
+#define CIFS_ASYNC_OP 2 /* do not wait for response */
+#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
#define CIFS_NO_RESP 0x040 /* no response buffer required */
@@ -790,6 +793,9 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+/* reconnect after this many failed echo attempts */
+GLOBAL_EXTERN unsigned short echo_retries;
+
void cifs_oplock_break(struct work_struct *work);
void cifs_oplock_break_get(struct cifsFileInfo *cfile);
void cifs_oplock_break_put(struct cifsFileInfo *cfile);
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de36b09763a8..b5c8cc5d7a7f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -23,6 +23,7 @@
#define _CIFSPDU_H
#include <net/sock.h>
+#include <asm/unaligned.h>
#include "smbfsctl.h"
#ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -50,6 +51,7 @@
#define SMB_COM_SETATTR 0x09 /* trivial response */
#define SMB_COM_LOCKING_ANDX 0x24 /* trivial response */
#define SMB_COM_COPY 0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO 0x2B /* echo request */
#define SMB_COM_OPEN_ANDX 0x2D /* Legacy open for old servers */
#define SMB_COM_READ_ANDX 0x2E
#define SMB_COM_WRITE_ANDX 0x2F
@@ -425,11 +427,49 @@ struct smb_hdr {
__u16 Mid;
__u8 WordCount;
} __attribute__((packed));
-/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+
+/* given a pointer to an smb_hdr retrieve a char pointer to the byte count */
+#define BCC(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + \
+ (2 * (smb_var)->WordCount))
+
/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
+#define pByteArea(smb_var) (BCC(smb_var) + 2)
+
+/* get the converted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc(struct smb_hdr *hdr)
+{
+ __u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+ return get_unaligned(bc_ptr);
+}
+
+/* get the unconverted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc_le(struct smb_hdr *hdr)
+{
+ __le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+ return get_unaligned_le16(bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in host-byte order */
+static inline void
+put_bcc(__u16 count, struct smb_hdr *hdr)
+{
+ __u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+ put_unaligned(count, bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in little-endian */
+static inline void
+put_bcc_le(__u16 count, struct smb_hdr *hdr)
+{
+ __le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+ put_unaligned_le16(count, bc_ptr);
+}
/*
* Computer Name Length (since Netbios name was length 16 with last byte 0x20)
@@ -760,6 +800,20 @@ typedef struct smb_com_tconx_rsp_ext {
*
*/
+typedef struct smb_com_echo_req {
+ struct smb_hdr hdr;
+ __le16 EchoCount;
+ __le16 ByteCount;
+ char Data[1];
+} __attribute__((packed)) ECHO_REQ;
+
+typedef struct smb_com_echo_rsp {
+ struct smb_hdr hdr;
+ __le16 SequenceNumber;
+ __le16 ByteCount;
+ char Data[1];
+} __attribute__((packed)) ECHO_RSP;
+
typedef struct smb_com_logoff_andx_req {
struct smb_hdr hdr; /* wct = 2 */
__u8 AndXCommand;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e6d1481b16c1..982895fa7615 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -61,6 +61,12 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
+extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
+ struct TCP_Server_Info *server);
+extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
+extern int cifs_call_async(struct TCP_Server_Info *server,
+ struct smb_hdr *in_buf, mid_callback_t *callback,
+ void *cbdata);
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
@@ -347,12 +353,13 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
- const bool waitFlag);
+ const bool waitFlag, const __u8 oplock_level);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
const __u16 lock_type, const bool waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBEcho(struct TCP_Server_Info *server);
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
extern struct cifsSesInfo *sesInfoAlloc(void);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2f6795e524d3..3106f5e5c633 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -331,37 +331,35 @@ smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
static int validate_t2(struct smb_t2_rsp *pSMB)
{
- int rc = -EINVAL;
- int total_size;
- char *pBCC;
+ unsigned int total_size;
+
+ /* check for plausible wct */
+ if (pSMB->hdr.WordCount < 10)
+ goto vt2_err;
- /* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
- if (pSMB->hdr.WordCount >= 10) {
- if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
- (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
- /* check that bcc is at least as big as parms + data */
- /* check that bcc is less than negotiated smb buffer */
- total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
- if (total_size < 512) {
- total_size +=
- le16_to_cpu(pSMB->t2_rsp.DataCount);
- /* BCC le converted in SendReceive */
- pBCC = (pSMB->hdr.WordCount * 2) +
- sizeof(struct smb_hdr) +
- (char *)pSMB;
- if ((total_size <= (*(u16 *)pBCC)) &&
- (total_size <
- CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
- return 0;
- }
- }
- }
- }
+ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
+ get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
+ goto vt2_err;
+
+ /* check that bcc is at least as big as parms + data */
+ /* check that bcc is less than negotiated smb buffer */
+ total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
+ if (total_size >= 512)
+ goto vt2_err;
+
+ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
+ if (total_size > get_bcc(&pSMB->hdr) ||
+ total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
+ goto vt2_err;
+
+ return 0;
+vt2_err:
cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
sizeof(struct smb_t2_rsp) + 16);
- return rc;
+ return -EINVAL;
}
+
int
CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
{
@@ -452,7 +450,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
- GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
/* even though we do not use raw we might as well set this
accurately, in case we ever find a need for it */
if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -566,7 +563,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
- GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
server->timeAdj *= 60;
@@ -706,6 +702,53 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
return rc;
}
+/*
+ * This is a no-op for now. We're not really interested in the reply, but
+ * rather in the fact that the server sent one and that server->lstrp
+ * gets updated.
+ *
+ * FIXME: maybe we should consider checking that the reply matches request?
+ */
+static void
+cifs_echo_callback(struct mid_q_entry *mid)
+{
+ struct TCP_Server_Info *server = mid->callback_data;
+
+ DeleteMidQEntry(mid);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
+}
+
+int
+CIFSSMBEcho(struct TCP_Server_Info *server)
+{
+ ECHO_REQ *smb;
+ int rc = 0;
+
+ cFYI(1, "In echo request");
+
+ rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
+ if (rc)
+ return rc;
+
+ /* set up echo request */
+ smb->hdr.Tid = cpu_to_le16(0xffff);
+ smb->hdr.WordCount = 1;
+ put_unaligned_le16(1, &smb->EchoCount);
+ put_bcc_le(1, &smb->hdr);
+ smb->Data[0] = 'a';
+ smb->hdr.smb_buf_length += 3;
+
+ rc = cifs_call_async(server, (struct smb_hdr *)smb,
+ cifs_echo_callback, server);
+ if (rc)
+ cFYI(1, "Echo request failed: %d", rc);
+
+ cifs_small_buf_release(smb);
+
+ return rc;
+}
+
int
CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
{
@@ -1193,7 +1236,7 @@ OldOpenRetry:
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ (struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
@@ -1306,7 +1349,7 @@ openRetry:
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ (struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
@@ -1388,7 +1431,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
- &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
+ &resp_buf_type, CIFS_LOG_ERROR);
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
@@ -1663,7 +1706,8 @@ int
CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
- const __u32 numLock, const __u8 lockType, const bool waitFlag)
+ const __u32 numLock, const __u8 lockType,
+ const bool waitFlag, const __u8 oplock_level)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
@@ -1691,6 +1735,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->NumberOfLocks = cpu_to_le16(numLock);
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
pSMB->LockType = lockType;
+ pSMB->OplockLevel = oplock_level;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
@@ -3087,7 +3132,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
- CIFS_STD_OP);
+ 0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
cFYI(1, "Send error in QuerySecDesc = %d", rc);
@@ -5562,7 +5607,7 @@ QAllEAsRetry:
}
/* make sure list_len doesn't go past end of SMB */
- end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9f59887badd2..18d3c7724d6e 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -52,6 +52,9 @@
#define CIFS_PORT 445
#define RFC1001_PORT 139
+/* SMB echo "timeout" -- FIXME: tunable? */
+#define SMB_ECHO_INTERVAL (60 * HZ)
+
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
@@ -152,6 +155,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
+ cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
@@ -163,7 +167,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
}
spin_unlock(&cifs_tcp_ses_lock);
+
/* do not want to be sending data on a socket we are freeing */
+ cFYI(1, "%s: tearing down socket", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
@@ -180,22 +186,20 @@ cifs_reconnect(struct TCP_Server_Info *server)
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
+ server->lstrp = jiffies;
+ mutex_unlock(&server->srv_mutex);
+ /* mark submitted MIDs for retry and issue callback */
+ cFYI(1, "%s: issuing mid callbacks", __func__);
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct
- mid_q_entry,
- qhead);
- if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- /* Mark other intransit requests as needing
- retry so we do not immediately mark the
- session bad again (ie after we reconnect
- below) as they timeout too */
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ if (mid_entry->midState == MID_REQUEST_SUBMITTED)
mid_entry->midState = MID_RETRY_NEEDED;
- }
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
}
spin_unlock(&GlobalMid_Lock);
- mutex_unlock(&server->srv_mutex);
while ((server->tcpStatus != CifsExiting) &&
(server->tcpStatus != CifsGood)) {
@@ -212,10 +216,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood;
spin_unlock(&GlobalMid_Lock);
- /* atomic_set(&server->inFlight,0);*/
- wake_up(&server->response_q);
}
}
+
return rc;
}
@@ -229,9 +232,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
{
struct smb_t2_rsp *pSMBt;
- int total_data_size;
- int data_in_this_rsp;
int remaining;
+ __u16 total_data_size, data_in_this_rsp;
if (pSMB->Command != SMB_COM_TRANSACTION2)
return 0;
@@ -245,8 +247,8 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
pSMBt = (struct smb_t2_rsp *)pSMB;
- total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
- data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+ total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+ data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
remaining = total_data_size - data_in_this_rsp;
@@ -272,21 +274,18 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
{
struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB;
- int total_data_size;
- int total_in_buf;
- int remaining;
- int total_in_buf2;
char *data_area_of_target;
char *data_area_of_buf2;
- __u16 byte_count;
+ int remaining;
+ __u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
- total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
+ total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
- if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+ if (total_data_size !=
+ get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
cFYI(1, "total data size of primary and secondary t2 differ");
- }
- total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+ total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
remaining = total_data_size - total_in_buf;
@@ -296,28 +295,28 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
if (remaining == 0) /* nothing to do, ignore */
return 0;
- total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
+ total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
if (remaining < total_in_buf2) {
cFYI(1, "transact2 2nd response contains too much data");
}
/* find end of first SMB data area */
data_area_of_target = (char *)&pSMBt->hdr.Protocol +
- le16_to_cpu(pSMBt->t2_rsp.DataOffset);
+ get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
/* validate target area */
- data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
- le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+ data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
+ get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
data_area_of_target += total_in_buf;
/* copy second buffer into end of first buffer */
memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
total_in_buf += total_in_buf2;
- pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
- byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
+ put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+ byte_count = get_bcc_le(pTargetSMB);
byte_count += total_in_buf2;
- BCC_LE(pTargetSMB) = cpu_to_le16(byte_count);
+ put_bcc_le(byte_count, pTargetSMB);
byte_count = pTargetSMB->smb_buf_length;
byte_count += total_in_buf2;
@@ -331,7 +330,26 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
return 0; /* we are done */
} else /* more responses to go */
return 1;
+}
+
+static void
+cifs_echo_request(struct work_struct *work)
+{
+ int rc;
+ struct TCP_Server_Info *server = container_of(work,
+ struct TCP_Server_Info, echo.work);
+
+ /* no need to ping if we got a response recently */
+ if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+ goto requeue_echo;
+ rc = CIFSSMBEcho(server);
+ if (rc)
+ cFYI(1, "Unable to send echo request to server: %s",
+ server->hostname);
+
+requeue_echo:
+ queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
}
static int
@@ -345,8 +363,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
struct msghdr smb_msg;
struct kvec iov;
struct socket *csocket = server->ssocket;
- struct list_head *tmp;
- struct cifsSesInfo *ses;
+ struct list_head *tmp, *tmp2;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
char temp;
@@ -399,7 +416,20 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
pdu_length = 4; /* enough to get RFC1001 header */
+
incomplete_rcv:
+ if (echo_retries > 0 &&
+ time_after(jiffies, server->lstrp +
+ (echo_retries * SMB_ECHO_INTERVAL))) {
+ cERROR(1, "Server %s has not responded in %d seconds. "
+ "Reconnecting...", server->hostname,
+ (echo_retries * SMB_ECHO_INTERVAL / HZ));
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ wake_up(&server->response_q);
+ continue;
+ }
+
length =
kernel_recvmsg(csocket, &smb_msg,
&iov, 1, pdu_length, 0 /* BB other flags? */);
@@ -559,10 +589,11 @@ incomplete_rcv:
continue;
}
+ mid_entry = NULL;
+ server->lstrp = jiffies;
- task_to_wake = NULL;
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if ((mid_entry->mid == smb_buffer->Mid) &&
@@ -603,20 +634,19 @@ incomplete_rcv:
mid_entry->resp_buf = smb_buffer;
mid_entry->largeBuf = isLargeBuf;
multi_t2_fnd:
- task_to_wake = mid_entry->tsk;
mid_entry->midState = MID_RESPONSE_RECEIVED;
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
#ifdef CONFIG_CIFS_STATS2
mid_entry->when_received = jiffies;
#endif
- /* so we do not time out requests to server
- which is still responding (since server could
- be busy but not dead) */
- server->lstrp = jiffies;
break;
}
+ mid_entry = NULL;
}
spin_unlock(&GlobalMid_Lock);
- if (task_to_wake) {
+
+ if (mid_entry != NULL) {
/* Was previous buf put in mpx struct for multi-rsp? */
if (!isMultiRsp) {
/* smb buffer will be freed by user thread */
@@ -625,11 +655,10 @@ multi_t2_fnd:
else
smallbuf = NULL;
}
- wake_up_process(task_to_wake);
} else if (!is_valid_oplock_break(smb_buffer, server) &&
!isMultiRsp) {
cERROR(1, "No task to wake, unknown frame received! "
- "NumMids %d", midCount.counter);
+ "NumMids %d", atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
@@ -677,44 +706,16 @@ multi_t2_fnd:
if (smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf);
- /*
- * BB: we shouldn't have to do any of this. It shouldn't be
- * possible to exit from the thread with active SMB sessions
- */
- spin_lock(&cifs_tcp_ses_lock);
- if (list_empty(&server->pending_mid_q)) {
- /* loop through server session structures attached to this and
- mark them dead */
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo,
- smb_ses_list);
- ses->status = CifsExiting;
- ses->server = NULL;
- }
- spin_unlock(&cifs_tcp_ses_lock);
- } else {
- /* although we can not zero the server struct pointer yet,
- since there are active requests which may depnd on them,
- mark the corresponding SMB sessions as exiting too */
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo,
- smb_ses_list);
- ses->status = CifsExiting;
- }
-
+ if (!list_empty(&server->pending_mid_q)) {
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- cFYI(1, "Clearing Mid 0x%x - waking up ",
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Clearing Mid 0x%x - issuing callback",
mid_entry->mid);
- task_to_wake = mid_entry->tsk;
- if (task_to_wake)
- wake_up_process(task_to_wake);
- }
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
}
spin_unlock(&GlobalMid_Lock);
- spin_unlock(&cifs_tcp_ses_lock);
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
@@ -732,18 +733,6 @@ multi_t2_fnd:
coming home not much else we can do but free the memory */
}
- /* last chance to mark ses pointers invalid
- if there are any pointing to this (e.g
- if a crazy root user tried to kill cifsd
- kernel thread explicitly this might happen) */
- /* BB: This shouldn't be necessary, see above */
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
- ses->server = NULL;
- }
- spin_unlock(&cifs_tcp_ses_lock);
-
kfree(server->hostname);
task_to_wake = xchg(&server->tsk, NULL);
kfree(server);
@@ -1612,6 +1601,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ cancel_delayed_work_sync(&server->echo);
+
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
@@ -1701,8 +1692,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
+ tcp_ses->lstrp = jiffies;
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
/*
* at this point we are the only ones with the pointer
@@ -1751,6 +1744,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cifs_fscache_get_client_cookie(tcp_ses);
+ /* queue echo request delayed work */
+ queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+
return tcp_ses;
out_err_crypto_release:
@@ -2936,8 +2932,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
TCONX_RSP *pSMBr;
unsigned char *bcc_ptr;
int rc = 0;
- int length, bytes_left;
- __u16 count;
+ int length;
+ __u16 bytes_left, count;
if (ses == NULL)
return -EIO;
@@ -2965,7 +2961,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++; /* skip password */
/* already aligned so no need to do it below */
} else {
- pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
@@ -2983,7 +2979,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
#endif /* CIFS_WEAK_PW_HASH */
SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
- bcc_ptr += CIFS_SESS_KEY_SIZE;
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
@@ -3021,7 +3017,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
- CIFS_STD_OP);
+ 0);
/* above now done in SendReceive */
if ((rc == 0) && (tcon != NULL)) {
@@ -3031,7 +3027,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
- bytes_left = BCC(smb_buffer_response);
+ bytes_left = get_bcc(smb_buffer_response);
length = strnlen(bcc_ptr, bytes_left - 2);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d843631c028d..d7d65a70678e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -287,6 +287,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct inode *inode = cifs_file->dentry->d_inode;
struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsLockInfo *li, *tmp;
spin_lock(&cifs_file_list_lock);
@@ -302,6 +303,13 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
if (list_empty(&cifsi->openFileList)) {
cFYI(1, "closing last open instance for inode %p",
cifs_file->dentry->d_inode);
+
+ /* in strict cache mode we need invalidate mapping on the last
+ close because it may cause a error when we open this file
+ again and get at least level II oplock */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ CIFS_I(inode)->invalid_mapping = true;
+
cifs_set_oplock_level(cifsi, 0);
}
spin_unlock(&cifs_file_list_lock);
@@ -726,12 +734,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
/* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
- 0, 1, lockType, 0 /* wait flag */ );
+ 0, 1, lockType, 0 /* wait flag */, 0);
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
- 0 /* wait flag */ );
+ 0 /* wait flag */, 0);
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
@@ -748,13 +756,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 0, 1,
lockType | LOCKING_ANDX_SHARED_LOCK,
- 0 /* wait flag */);
+ 0 /* wait flag */, 0);
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid,
length, pfLock->fl_start, 1, 0,
lockType |
LOCKING_ANDX_SHARED_LOCK,
- 0 /* wait flag */);
+ 0 /* wait flag */, 0);
pfLock->fl_type = F_RDLCK;
if (rc != 0)
cERROR(1, "Error unlocking "
@@ -797,8 +805,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
if (numLock) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
- pfLock->fl_start,
- 0, numLock, lockType, wait_flag);
+ pfLock->fl_start, 0, numLock, lockType,
+ wait_flag, 0);
if (rc == 0) {
/* For Windows locks we must store them. */
@@ -818,9 +826,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
(pfLock->fl_start + length) >=
(li->offset + li->length)) {
stored_rc = CIFSSMBLock(xid, tcon,
- netfid,
- li->length, li->offset,
- 1, 0, li->type, false);
+ netfid, li->length,
+ li->offset, 1, 0,
+ li->type, false, 0);
if (stored_rc)
rc = stored_rc;
else {
@@ -839,29 +847,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
return rc;
}
-/*
- * Set the timeout on write requests past EOF. For some servers (Windows)
- * these calls can be very long.
- *
- * If we're writing >10M past the EOF we give a 180s timeout. Anything less
- * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
- * The 10M cutoff is totally arbitrary. A better scheme for this would be
- * welcome if someone wants to suggest one.
- *
- * We may be able to do a better job with this if there were some way to
- * declare that a file should be sparse.
- */
-static int
-cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
-{
- if (offset <= cifsi->server_eof)
- return CIFS_STD_OP;
- else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
- return CIFS_VLONG_OP;
- else
- return CIFS_LONG_OP;
-}
-
/* update the file size (if needed) after a write */
static void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@@ -882,7 +867,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid, long_op;
+ int xid;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
@@ -903,7 +888,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
xid = GetXid();
- long_op = cifs_write_timeout(cifsi, *poffset);
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
rc = -EAGAIN;
@@ -931,7 +915,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
min_t(const int, cifs_sb->wsize,
write_size - total_written),
*poffset, &bytes_written,
- NULL, write_data + total_written, long_op);
+ NULL, write_data + total_written, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -944,8 +928,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
cifs_update_eof(cifsi, *poffset, bytes_written);
*poffset += bytes_written;
}
- long_op = CIFS_STD_OP; /* subsequent writes fast -
- 15 seconds is plenty */
}
cifs_stats_bytes_written(pTcon, total_written);
@@ -974,7 +956,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid, long_op;
+ int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
@@ -987,7 +969,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
xid = GetXid();
- long_op = cifs_write_timeout(cifsi, *poffset);
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
rc = -EAGAIN;
@@ -1017,7 +998,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
rc = CIFSSMBWrite2(xid, pTcon,
open_file->netfid, len,
*poffset, &bytes_written,
- iov, 1, long_op);
+ iov, 1, 0);
} else
rc = CIFSSMBWrite(xid, pTcon,
open_file->netfid,
@@ -1025,7 +1006,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
write_size - total_written),
*poffset, &bytes_written,
write_data + total_written,
- NULL, long_op);
+ NULL, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1038,8 +1019,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
cifs_update_eof(cifsi, *poffset, bytes_written);
*poffset += bytes_written;
}
- long_op = CIFS_STD_OP; /* subsequent writes fast -
- 15 seconds is plenty */
}
cifs_stats_bytes_written(pTcon, total_written);
@@ -1239,7 +1218,7 @@ static int cifs_writepages(struct address_space *mapping,
struct pagevec pvec;
int rc = 0;
int scanned = 0;
- int xid, long_op;
+ int xid;
cifs_sb = CIFS_SB(mapping->host->i_sb);
@@ -1377,43 +1356,67 @@ retry:
break;
}
if (n_iov) {
+retry_write:
open_file = find_writable_file(CIFS_I(mapping->host),
false);
if (!open_file) {
cERROR(1, "No writable handles for inode");
rc = -EBADF;
} else {
- long_op = cifs_write_timeout(cifsi, offset);
rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
bytes_to_write, offset,
&bytes_written, iov, n_iov,
- long_op);
+ 0);
cifsFileInfo_put(open_file);
- cifs_update_eof(cifsi, offset, bytes_written);
}
- if (rc || bytes_written < bytes_to_write) {
- cERROR(1, "Write2 ret %d, wrote %d",
- rc, bytes_written);
- mapping_set_error(mapping, rc);
- } else {
+ cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+
+ /*
+ * For now, treat a short write as if nothing got
+ * written. A zero length write however indicates
+ * ENOSPC or EFBIG. We have no way to know which
+ * though, so call it ENOSPC for now. EFBIG would
+ * get translated to AS_EIO anyway.
+ *
+ * FIXME: make it take into account the data that did
+ * get written
+ */
+ if (rc == 0) {
+ if (bytes_written == 0)
+ rc = -ENOSPC;
+ else if (bytes_written < bytes_to_write)
+ rc = -EAGAIN;
+ }
+
+ /* retry on data-integrity flush */
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
+ goto retry_write;
+
+ /* fix the stats and EOF */
+ if (bytes_written > 0) {
cifs_stats_bytes_written(tcon, bytes_written);
+ cifs_update_eof(cifsi, offset, bytes_written);
}
for (i = 0; i < n_iov; i++) {
page = pvec.pages[first + i];
- /* Should we also set page error on
- success rc but too little data written? */
- /* BB investigate retry logic on temporary
- server crash cases and how recovery works
- when page marked as error */
- if (rc)
+ /* on retryable write error, redirty page */
+ if (rc == -EAGAIN)
+ redirty_page_for_writepage(wbc, page);
+ else if (rc != 0)
SetPageError(page);
kunmap(page);
unlock_page(page);
end_page_writeback(page);
page_cache_release(page);
}
+
+ if (rc != -EAGAIN)
+ mapping_set_error(mapping, rc);
+ else
+ rc = 0;
+
if ((wbc->nr_to_write -= n_iov) <= 0)
done = 1;
index = next;
@@ -1525,27 +1528,47 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
return rc;
}
-int cifs_fsync(struct file *file, int datasync)
+int cifs_strict_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
struct cifsTconInfo *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
xid = GetXid();
cFYI(1, "Sync file - name: %s datasync: 0x%x",
file->f_path.dentry->d_name.name, datasync);
- rc = filemap_write_and_wait(inode->i_mapping);
- if (rc == 0) {
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ if (!CIFS_I(inode)->clientCanCacheRead)
+ cifs_invalidate_mapping(inode);
- tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
- rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
- }
+ tcon = tlink_tcon(smbfile->tlink);
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+ rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
+
+ FreeXid(xid);
+ return rc;
+}
+
+int cifs_fsync(struct file *file, int datasync)
+{
+ int xid;
+ int rc = 0;
+ struct cifsTconInfo *tcon;
+ struct cifsFileInfo *smbfile = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+
+ xid = GetXid();
+
+ cFYI(1, "Sync file - name: %s datasync: 0x%x",
+ file->f_path.dentry->d_name.name, datasync);
+
+ tcon = tlink_tcon(smbfile->tlink);
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+ rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
FreeXid(xid);
return rc;
@@ -1596,42 +1619,42 @@ int cifs_flush(struct file *file, fl_owner_t id)
return rc;
}
-ssize_t cifs_user_read(struct file *file, char __user *read_data,
- size_t read_size, loff_t *poffset)
+static ssize_t
+cifs_iovec_read(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *poffset)
{
- int rc = -EACCES;
- unsigned int bytes_read = 0;
- unsigned int total_read = 0;
- unsigned int current_read_size;
+ int rc;
+ int xid;
+ unsigned int total_read, bytes_read = 0;
+ size_t len, cur_len;
+ int iov_offset = 0;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid;
struct cifsFileInfo *open_file;
- char *smb_read_data;
- char __user *current_offset;
struct smb_com_read_rsp *pSMBr;
+ char *read_data;
+
+ if (!nr_segs)
+ return 0;
+
+ len = iov_length(iov, nr_segs);
+ if (!len)
+ return 0;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- if (file->private_data == NULL) {
- rc = -EBADF;
- FreeXid(xid);
- return rc;
- }
open_file = file->private_data;
pTcon = tlink_tcon(open_file->tlink);
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
- for (total_read = 0, current_offset = read_data;
- read_size > total_read;
- total_read += bytes_read, current_offset += bytes_read) {
- current_read_size = min_t(const int, read_size - total_read,
- cifs_sb->rsize);
+ for (total_read = 0; total_read < len; total_read += bytes_read) {
+ cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
rc = -EAGAIN;
- smb_read_data = NULL;
+ read_data = NULL;
+
while (rc == -EAGAIN) {
int buf_type = CIFS_NO_BUFFER;
if (open_file->invalidHandle) {
@@ -1639,27 +1662,25 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
if (rc != 0)
break;
}
- rc = CIFSSMBRead(xid, pTcon,
- open_file->netfid,
- current_read_size, *poffset,
- &bytes_read, &smb_read_data,
- &buf_type);
- pSMBr = (struct smb_com_read_rsp *)smb_read_data;
- if (smb_read_data) {
- if (copy_to_user(current_offset,
- smb_read_data +
- 4 /* RFC1001 length field */ +
- le16_to_cpu(pSMBr->DataOffset),
- bytes_read))
+ rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
+ cur_len, *poffset, &bytes_read,
+ &read_data, &buf_type);
+ pSMBr = (struct smb_com_read_rsp *)read_data;
+ if (read_data) {
+ char *data_offset = read_data + 4 +
+ le16_to_cpu(pSMBr->DataOffset);
+ if (memcpy_toiovecend(iov, data_offset,
+ iov_offset, bytes_read))
rc = -EFAULT;
-
if (buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(smb_read_data);
+ cifs_small_buf_release(read_data);
else if (buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(smb_read_data);
- smb_read_data = NULL;
+ cifs_buf_release(read_data);
+ read_data = NULL;
+ iov_offset += bytes_read;
}
}
+
if (rc || (bytes_read == 0)) {
if (total_read) {
break;
@@ -1672,13 +1693,57 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
*poffset += bytes_read;
}
}
+
FreeXid(xid);
return total_read;
}
+ssize_t cifs_user_read(struct file *file, char __user *read_data,
+ size_t read_size, loff_t *poffset)
+{
+ struct iovec iov;
+ iov.iov_base = read_data;
+ iov.iov_len = read_size;
+
+ return cifs_iovec_read(file, &iov, 1, poffset);
+}
+
+static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t read;
+
+ read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
+ if (read > 0)
+ iocb->ki_pos = pos;
+
+ return read;
+}
+
+ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct inode *inode;
+
+ inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+ if (CIFS_I(inode)->clientCanCacheRead)
+ return generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+ /*
+ * In strict cache mode we need to read from the server all the time
+ * if we don't have level II oplock because the server can delay mtime
+ * change - so we can't make a decision about inode invalidating.
+ * And we can also fail with pagereading if there are mandatory locks
+ * on pages affected by this read but not on the region from pos to
+ * pos+len-1.
+ */
+
+ return cifs_user_readv(iocb, iov, nr_segs, pos);
+}
static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
- loff_t *poffset)
+ loff_t *poffset)
{
int rc = -EACCES;
unsigned int bytes_read = 0;
@@ -1746,6 +1811,21 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return total_read;
}
+int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int rc, xid;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ xid = GetXid();
+
+ if (!CIFS_I(inode)->clientCanCacheRead)
+ cifs_invalidate_mapping(inode);
+
+ rc = generic_file_mmap(file, vma);
+ FreeXid(xid);
+ return rc;
+}
+
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
@@ -2192,7 +2272,8 @@ void cifs_oplock_break(struct work_struct *work)
*/
if (!cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
- 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
+ 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
+ cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, "Oplock release rc = %d", rc);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6c9ee8014ff0..8852470b4fbb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -44,13 +44,17 @@ static void cifs_set_ops(struct inode *inode)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ inode->i_fop = &cifs_file_strict_nobrl_ops;
+ else
+ inode->i_fop = &cifs_file_strict_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
else { /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
}
-
/* check if server can support readpages */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
@@ -1679,7 +1683,7 @@ cifs_inode_needs_reval(struct inode *inode)
/*
* Zap the cache. Called when invalid_mapping flag is set.
*/
-static void
+void
cifs_invalidate_mapping(struct inode *inode)
{
int rc;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 43f10281bc19..a09e077ba925 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -571,7 +571,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pCifsInode = CIFS_I(netfile->dentry->d_inode);
cifs_set_oplock_level(pCifsInode,
- pSMB->OplockLevel);
+ pSMB->OplockLevel ? OPLOCK_READ : 0);
/*
* cifs_oplock_break_put() can't be called
* from here. Get reference after queueing
@@ -637,77 +637,6 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
return;
}
-/* Convert 16 bit Unicode pathname to wire format from string in current code
- page. Conversion may involve remapping up the seven characters that are
- only legal in POSIX-like OS (if they are present in the string). Path
- names are little endian 16 bit Unicode on the wire */
-int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
- const struct nls_table *cp, int mapChars)
-{
- int i, j, charlen;
- int len_remaining = maxlen;
- char src_char;
- __u16 temp;
-
- if (!mapChars)
- return cifs_strtoUCS(target, source, PATH_MAX, cp);
-
- for (i = 0, j = 0; i < maxlen; j++) {
- src_char = source[i];
- switch (src_char) {
- case 0:
- target[j] = 0;
- goto ctoUCS_out;
- case ':':
- target[j] = cpu_to_le16(UNI_COLON);
- break;
- case '*':
- target[j] = cpu_to_le16(UNI_ASTERIK);
- break;
- case '?':
- target[j] = cpu_to_le16(UNI_QUESTION);
- break;
- case '<':
- target[j] = cpu_to_le16(UNI_LESSTHAN);
- break;
- case '>':
- target[j] = cpu_to_le16(UNI_GRTRTHAN);
- break;
- case '|':
- target[j] = cpu_to_le16(UNI_PIPE);
- break;
- /* BB We can not handle remapping slash until
- all the calls to build_path_from_dentry
- are modified, as they use slash as separator BB */
- /* case '\\':
- target[j] = cpu_to_le16(UNI_SLASH);
- break;*/
- default:
- charlen = cp->char2uni(source+i,
- len_remaining, &temp);
- /* if no match, use question mark, which
- at least in some cases servers as wild card */
- if (charlen < 1) {
- target[j] = cpu_to_le16(0x003f);
- charlen = 1;
- } else
- target[j] = cpu_to_le16(temp);
- len_remaining -= charlen;
- /* character may take more than one byte in the
- the source string, but will take exactly two
- bytes in the target string */
- i += charlen;
- continue;
- }
- i++; /* move to next char in source string */
- len_remaining--;
- }
-
-ctoUCS_out:
- return i;
-}
-
void
cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 6783ce6cdc89..8d9189f64477 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -916,14 +916,14 @@ unsigned int
smbCalcSize(struct smb_hdr *ptr)
{
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
- 2 /* size of the bcc field */ + BCC(ptr));
+ 2 /* size of the bcc field */ + get_bcc(ptr));
}
unsigned int
smbCalcSize_LE(struct smb_hdr *ptr)
{
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
- 2 /* size of the bcc field */ + le16_to_cpu(BCC_LE(ptr)));
+ 2 /* size of the bcc field */ + get_bcc_le(ptr));
}
/* The following are taken from fs/ntfs/util.c */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index eb746486e49e..1adc9625a344 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -277,7 +277,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
}
static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
int len;
@@ -323,7 +323,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
return;
}
-static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
@@ -575,12 +575,11 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
char *str_area;
SESSION_SETUP_ANDX *pSMB;
__u32 capabilities;
- int count;
+ __u16 count;
int resp_buf_type;
struct kvec iov[3];
enum securityEnum type;
- __u16 action;
- int bytes_remaining;
+ __u16 action, bytes_remaining;
struct key *spnego_key = NULL;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
u16 blob_len;
@@ -876,10 +875,10 @@ ssetup_ntlmssp_authenticate:
count = iov[1].iov_len + iov[2].iov_len;
smb_buf->smb_buf_length += count;
- BCC_LE(smb_buf) = cpu_to_le16(count);
+ put_bcc_le(count, smb_buf);
rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
- CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
+ CIFS_LOG_ERROR);
/* SMB request buf freed in SendReceive2 */
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
@@ -910,7 +909,7 @@ ssetup_ntlmssp_authenticate:
cFYI(1, "UID = %d ", ses->Suid);
/* response can have either 3 or 4 word count - Samba sends 3 */
/* and lanman response is 3 */
- bytes_remaining = BCC(smb_buf);
+ bytes_remaining = get_bcc(smb_buf);
bcc_ptr = pByteArea(smb_buf);
if (smb_buf->WordCount == 4) {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 59ca81b16919..c1ccca1a933f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -36,7 +36,13 @@
extern mempool_t *cifs_mid_poolp;
-static struct mid_q_entry *
+static void
+wake_up_task(struct mid_q_entry *mid)
+{
+ wake_up_process(mid->callback_data);
+}
+
+struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
{
struct mid_q_entry *temp;
@@ -58,28 +64,28 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
- temp->tsk = current;
+
+ /*
+ * The default is for the mid to be synchronous, so the
+ * default callback just wakes up the current task.
+ */
+ temp->callback = wake_up_task;
+ temp->callback_data = current;
}
- spin_lock(&GlobalMid_Lock);
- list_add_tail(&temp->qhead, &server->pending_mid_q);
atomic_inc(&midCount);
temp->midState = MID_REQUEST_ALLOCATED;
- spin_unlock(&GlobalMid_Lock);
return temp;
}
-static void
+void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
unsigned long now;
#endif
- spin_lock(&GlobalMid_Lock);
midEntry->midState = MID_FREE;
- list_del(&midEntry->qhead);
atomic_dec(&midCount);
- spin_unlock(&GlobalMid_Lock);
if (midEntry->largeBuf)
cifs_buf_release(midEntry->resp_buf);
else
@@ -103,6 +109,16 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
mempool_free(midEntry, cifs_mid_poolp);
}
+static void
+delete_mid(struct mid_q_entry *mid)
+{
+ spin_lock(&GlobalMid_Lock);
+ list_del(&mid->qhead);
+ spin_unlock(&GlobalMid_Lock);
+
+ DeleteMidQEntry(mid);
+}
+
static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
@@ -244,31 +260,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
return smb_sendv(server, &iov, 1);
}
-static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
+static int wait_for_free_request(struct TCP_Server_Info *server,
+ const int long_op)
{
if (long_op == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
- atomic_inc(&ses->server->inFlight);
+ atomic_inc(&server->inFlight);
return 0;
}
spin_lock(&GlobalMid_Lock);
while (1) {
- if (atomic_read(&ses->server->inFlight) >=
- cifs_max_pending){
+ if (atomic_read(&server->inFlight) >= cifs_max_pending) {
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
- atomic_inc(&ses->server->num_waiters);
+ atomic_inc(&server->num_waiters);
#endif
- wait_event(ses->server->request_q,
- atomic_read(&ses->server->inFlight)
+ wait_event(server->request_q,
+ atomic_read(&server->inFlight)
< cifs_max_pending);
#ifdef CONFIG_CIFS_STATS2
- atomic_dec(&ses->server->num_waiters);
+ atomic_dec(&server->num_waiters);
#endif
spin_lock(&GlobalMid_Lock);
} else {
- if (ses->server->tcpStatus == CifsExiting) {
+ if (server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
return -ENOENT;
}
@@ -278,7 +294,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
/* update # of requests on the wire to server */
if (long_op != CIFS_BLOCKING_OP)
- atomic_inc(&ses->server->inFlight);
+ atomic_inc(&server->inFlight);
spin_unlock(&GlobalMid_Lock);
break;
}
@@ -308,53 +324,81 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
return 0;
}
-static int wait_for_response(struct cifsSesInfo *ses,
- struct mid_q_entry *midQ,
- unsigned long timeout,
- unsigned long time_to_wait)
+static int
+wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
{
- unsigned long curr_timeout;
+ int error;
- for (;;) {
- curr_timeout = timeout + jiffies;
- wait_event_timeout(ses->server->response_q,
- midQ->midState != MID_REQUEST_SUBMITTED, timeout);
+ error = wait_event_killable(server->response_q,
+ midQ->midState != MID_REQUEST_SUBMITTED);
+ if (error < 0)
+ return -ERESTARTSYS;
- if (time_after(jiffies, curr_timeout) &&
- (midQ->midState == MID_REQUEST_SUBMITTED) &&
- ((ses->server->tcpStatus == CifsGood) ||
- (ses->server->tcpStatus == CifsNew))) {
+ return 0;
+}
- unsigned long lrt;
- /* We timed out. Is the server still
- sending replies ? */
- spin_lock(&GlobalMid_Lock);
- lrt = ses->server->lstrp;
- spin_unlock(&GlobalMid_Lock);
+/*
+ * Send a SMB request and set the callback function in the mid to handle
+ * the result. Caller is responsible for dealing with timeouts.
+ */
+int
+cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ mid_callback_t *callback, void *cbdata)
+{
+ int rc;
+ struct mid_q_entry *mid;
- /* Calculate time_to_wait past last receive time.
- Although we prefer not to time out if the
- server is still responding - we will time
- out if the server takes more than 15 (or 45
- or 180) seconds to respond to this request
- and has not responded to any request from
- other threads on the client within 10 seconds */
- lrt += time_to_wait;
- if (time_after(jiffies, lrt)) {
- /* No replies for time_to_wait. */
- cERROR(1, "server not responding");
- return -1;
- }
- } else {
- return 0;
- }
+ rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+ if (rc)
+ return rc;
+
+ mutex_lock(&server->srv_mutex);
+ mid = AllocMidQEntry(in_buf, server);
+ if (mid == NULL) {
+ mutex_unlock(&server->srv_mutex);
+ return -ENOMEM;
}
-}
+ /* put it on the pending_mid_q */
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&mid->qhead, &server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ goto out_err;
+ }
+
+ mid->callback = callback;
+ mid->callback_data = cbdata;
+ mid->midState = MID_REQUEST_SUBMITTED;
+#ifdef CONFIG_CIFS_STATS2
+ atomic_inc(&server->inSend);
+#endif
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+#ifdef CONFIG_CIFS_STATS2
+ atomic_dec(&server->inSend);
+ mid->when_sent = jiffies;
+#endif
+ mutex_unlock(&server->srv_mutex);
+ if (rc)
+ goto out_err;
+
+ return rc;
+out_err:
+ delete_mid(mid);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
+ return rc;
+}
/*
*
@@ -382,6 +426,81 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
+static int
+sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+{
+ int rc = 0;
+
+ cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
+ mid->mid, mid->midState);
+
+ spin_lock(&GlobalMid_Lock);
+ /* ensure that it's no longer on the pending_mid_q */
+ list_del_init(&mid->qhead);
+
+ switch (mid->midState) {
+ case MID_RESPONSE_RECEIVED:
+ spin_unlock(&GlobalMid_Lock);
+ return rc;
+ case MID_REQUEST_SUBMITTED:
+ /* socket is going down, reject all calls */
+ if (server->tcpStatus == CifsExiting) {
+ cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
+ __func__, mid->mid, mid->command, mid->midState);
+ rc = -EHOSTDOWN;
+ break;
+ }
+ case MID_RETRY_NEEDED:
+ rc = -EAGAIN;
+ break;
+ default:
+ cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
+ mid->mid, mid->midState);
+ rc = -EIO;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ DeleteMidQEntry(mid);
+ return rc;
+}
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ struct mid_q_entry *mid)
+{
+ int rc = 0;
+
+ /* -4 for RFC1001 length and +2 for BCC field */
+ in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2;
+ in_buf->Command = SMB_COM_NT_CANCEL;
+ in_buf->WordCount = 0;
+ put_bcc_le(0, in_buf);
+
+ mutex_lock(&server->srv_mutex);
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+ mutex_unlock(&server->srv_mutex);
+
+ cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+ in_buf->Mid, rc);
+
+ return rc;
+}
+
int
SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -390,7 +509,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
int rc = 0;
int long_op;
unsigned int receive_len;
- unsigned long timeout;
struct mid_q_entry *midQ;
struct smb_hdr *in_buf = iov[0].iov_base;
@@ -413,7 +531,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
to the same server. We may make this configurable later or
use ses->maxReq */
- rc = wait_for_free_request(ses, long_op);
+ rc = wait_for_free_request(ses->server, long_op);
if (rc) {
cifs_small_buf_release(in_buf);
return rc;
@@ -457,65 +575,20 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (rc < 0)
goto out;
- if (long_op == CIFS_STD_OP)
- timeout = 15 * HZ;
- else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
- timeout = 180 * HZ;
- else if (long_op == CIFS_LONG_OP)
- timeout = 45 * HZ; /* should be greater than
- servers oplock break timeout (about 43 seconds) */
- else if (long_op == CIFS_ASYNC_OP)
+ if (long_op == CIFS_ASYNC_OP)
goto out;
- else if (long_op == CIFS_BLOCKING_OP)
- timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
- else {
- cERROR(1, "unknown timeout flag %d", long_op);
- rc = -EIO;
- goto out;
- }
-
- /* wait for 15 seconds or until woken up due to response arriving or
- due to last connection to this server being unmounted */
- if (signal_pending(current)) {
- /* if signal pending do not hold up user for full smb timeout
- but we still give response a chance to complete */
- timeout = 2 * HZ;
- }
-
- /* No user interrupts in wait - wreaks havoc with performance */
- wait_for_response(ses, midQ, timeout, 10 * HZ);
-
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf == NULL) {
- cERROR(1, "No response to cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
+ rc = wait_for_response(ses->server, midQ);
+ if (rc != 0)
+ goto out;
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
- /* Update # of requests on wire to server */
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -559,19 +632,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
- BCC(midQ->resp_buf) =
- le16_to_cpu(BCC_LE(midQ->resp_buf));
+ put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
if ((flags & CIFS_NO_RESP) == 0)
midQ->resp_buf = NULL; /* mark it so buf will
not be freed by
- DeleteMidQEntry */
+ delete_mid */
} else {
rc = -EIO;
cFYI(1, "Bad MID state?");
}
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
@@ -585,7 +657,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
{
int rc = 0;
unsigned int receive_len;
- unsigned long timeout;
struct mid_q_entry *midQ;
if (ses == NULL) {
@@ -610,7 +681,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
return -EIO;
}
- rc = wait_for_free_request(ses, long_op);
+ rc = wait_for_free_request(ses->server, long_op);
if (rc)
return rc;
@@ -649,64 +720,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (rc < 0)
goto out;
- if (long_op == CIFS_STD_OP)
- timeout = 15 * HZ;
- /* wait for 15 seconds or until woken up due to response arriving or
- due to last connection to this server being unmounted */
- else if (long_op == CIFS_ASYNC_OP)
+ if (long_op == CIFS_ASYNC_OP)
goto out;
- else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
- timeout = 180 * HZ;
- else if (long_op == CIFS_LONG_OP)
- timeout = 45 * HZ; /* should be greater than
- servers oplock break timeout (about 43 seconds) */
- else if (long_op == CIFS_BLOCKING_OP)
- timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
- else {
- cERROR(1, "unknown timeout flag %d", long_op);
- rc = -EIO;
- goto out;
- }
- if (signal_pending(current)) {
- /* if signal pending do not hold up user for full smb timeout
- but we still give response a chance to complete */
- timeout = 2 * HZ;
- }
-
- /* No user interrupts in wait - wreaks havoc with performance */
- wait_for_response(ses, midQ, timeout, 10 * HZ);
-
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf == NULL) {
- cERROR(1, "No response for cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
+ rc = wait_for_response(ses->server, midQ);
+ if (rc != 0)
+ goto out;
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
- /* Update # of requests on wire to server */
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -748,43 +775,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * out_buf->WordCount) + 2 /* bcc */ )
- BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+ put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
} else {
rc = -EIO;
cERROR(1, "Bad MID state?");
}
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
-/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
-
-static int
-send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
- struct mid_q_entry *midQ)
-{
- int rc = 0;
- struct cifsSesInfo *ses = tcon->ses;
- __u16 mid = in_buf->Mid;
-
- header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
- in_buf->Mid = mid;
- mutex_lock(&ses->server->srv_mutex);
- rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
- if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
- return rc;
- }
- rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
- mutex_unlock(&ses->server->srv_mutex);
- return rc;
-}
-
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
blocking lock to return. */
@@ -807,7 +811,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
pSMB->hdr.Mid = GetNextMid(ses->server);
return SendReceive(xid, ses, in_buf, out_buf,
- &bytes_returned, CIFS_STD_OP);
+ &bytes_returned, 0);
}
int
@@ -845,7 +849,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
return -EIO;
}
- rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
if (rc)
return rc;
@@ -863,7 +867,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
@@ -880,7 +884,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
@@ -899,10 +903,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
-
- rc = send_nt_cancel(tcon, in_buf, midQ);
+ rc = send_nt_cancel(ses->server, in_buf, midQ);
if (rc) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
} else {
@@ -914,47 +917,22 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
/* If we get -ENOLCK back the lock may have
already been removed. Don't exit in this case. */
if (rc && rc != -ENOLCK) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
}
- /* Wait 5 seconds for the response. */
- if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
+ if (wait_for_response(ses->server, midQ) == 0) {
/* We got the response - restart system call. */
rstart = 1;
}
}
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf) {
- spin_unlock(&GlobalMid_Lock);
- receive_len = midQ->resp_buf->smb_buf_length;
- } else {
- cERROR(1, "No response for cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
-
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0)
return rc;
- }
+ receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, "Frame too large received. Length: %d Xid: %d",
receive_len, xid);
@@ -998,10 +976,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * out_buf->WordCount) + 2 /* bcc */ )
- BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+ put_bcc(get_bcc_le(out_buf), out_buf);
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
if (rstart && rc == -EACCES)
return -ERESTARTSYS;
return rc;
diff --git a/fs/dcache.c b/fs/dcache.c
index 9f493ee4dcba..2a6bd9a4ae97 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -176,6 +176,7 @@ static void d_free(struct dentry *dentry)
/**
* dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * @dentry: the target dentry
* After this call, in-progress rcu-walk path lookup will fail. This
* should be called after unhashing, and after changing d_inode (if
* the dentry has not already been unhashed).
@@ -281,6 +282,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
/**
* d_kill - kill dentry and return parent
* @dentry: dentry to kill
+ * @parent: parent dentry
*
* The dentry must already be unhashed and removed from the LRU.
*
@@ -1973,7 +1975,7 @@ out:
/**
* d_validate - verify dentry provided from insecure source (deprecated)
* @dentry: The dentry alleged to be valid child of @dparent
- * @parent: The parent dentry (known to be valid)
+ * @dparent: The parent dentry (known to be valid)
*
* An insecure source has sent us a dentry, here we verify it and dget() it.
* This is used by ncpfs in its readdir implementation.
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 85882f6ba5f7..b044705eedd4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,12 +325,16 @@ void dio_end_io(struct bio *bio, int error)
}
EXPORT_SYMBOL_GPL(dio_end_io);
-static int
+static void
dio_bio_alloc(struct dio *dio, struct block_device *bdev,
sector_t first_sector, int nr_vecs)
{
struct bio *bio;
+ /*
+ * bio_alloc() is guaranteed to return a bio when called with
+ * __GFP_WAIT and we request a valid number of vectors.
+ */
bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev;
@@ -342,7 +346,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
dio->bio = bio;
dio->logical_offset_in_bio = dio->cur_page_fs_offset;
- return 0;
}
/*
@@ -583,8 +586,9 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector)
goto out;
sector = start_sector << (dio->blkbits - 9);
nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
+ nr_pages = min(nr_pages, BIO_MAX_PAGES);
BUG_ON(nr_pages <= 0);
- ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+ dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
dio->boundary = 0;
out:
return ret;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7aa767d4f06f..85c8cc8f2473 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -754,7 +754,7 @@ static int ext3_release_dquot(struct dquot *dquot);
static int ext3_mark_dquot_dirty(struct dquot *dquot);
static int ext3_write_info(struct super_block *sb, int type);
static int ext3_quota_on(struct super_block *sb, int type, int format_id,
- char *path);
+ struct path *path);
static int ext3_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -2877,27 +2877,20 @@ static int ext3_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext3_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
if (!test_opt(sb, QUOTA))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
-
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
- path_put(&path);
+ if (path->mnt->mnt_sb != sb)
return -EXDEV;
- }
/* Journaling quota? */
if (EXT3_SB(sb)->s_qf_names[type]) {
/* Quotafile not of fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
ext3_msg(sb, KERN_WARNING,
"warning: Quota file not on filesystem root. "
"Journaled quota will not work.");
@@ -2907,7 +2900,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
* When we journal data on quota file, we have to flush journal to see
* all updates to the file when we bypass pagecache...
*/
- if (ext3_should_journal_data(path.dentry->d_inode)) {
+ if (ext3_should_journal_data(path->dentry->d_inode)) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
@@ -2915,15 +2908,11 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
journal_lock_updates(EXT3_SB(sb)->s_journal);
err = journal_flush(EXT3_SB(sb)->s_journal);
journal_unlock_updates(EXT3_SB(sb)->s_journal);
- if (err) {
- path_put(&path);
+ if (err)
return err;
- }
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- return err;
+ return dquot_quota_on(sb, type, format_id, path);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cb10a06775e4..48ce561fafac 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1161,7 +1161,7 @@ static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *path);
+ struct path *path);
static int ext4_quota_off(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -4558,27 +4558,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
if (!test_opt(sb, QUOTA))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
-
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
- path_put(&path);
+ if (path->mnt->mnt_sb != sb)
return -EXDEV;
- }
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
/* Quotafile not in fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
ext4_msg(sb, KERN_WARNING,
"Quota file not on filesystem root. "
"Journaled quota will not work");
@@ -4589,7 +4582,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
* all updates to the file when we bypass pagecache...
*/
if (EXT4_SB(sb)->s_journal &&
- ext4_should_journal_data(path.dentry->d_inode)) {
+ ext4_should_journal_data(path->dentry->d_inode)) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
@@ -4597,15 +4590,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
- if (err) {
- path_put(&path);
+ if (err)
return err;
- }
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- return err;
+ return dquot_quota_on(sb, type, format_id, path);
}
static int ext4_quota_off(struct super_block *sb, int type)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2232b3c780bd..7aa7d4f8984a 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -74,16 +74,14 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
}
/**
- * GFS2 lookup code fills in vfs inode contents based on info obtained
- * from directory entry inside gfs2_inode_lookup(). This has caused issues
- * with NFS code path since its get_dentry routine doesn't have the relevant
- * directory entry when gfs2_inode_lookup() is invoked. Part of the code
- * segment inside gfs2_inode_lookup code needs to get moved around.
+ * gfs2_set_iop - Sets inode operations
+ * @inode: The inode with correct i_mode filled in
*
- * Clears I_NEW as well.
- **/
+ * GFS2 lookup code fills in vfs inode contents based on info obtained
+ * from directory entry inside gfs2_inode_lookup().
+ */
-void gfs2_set_iop(struct inode *inode)
+static void gfs2_set_iop(struct inode *inode)
{
struct gfs2_sbd *sdp = GFS2_SB(inode);
umode_t mode = inode->i_mode;
@@ -106,8 +104,6 @@ void gfs2_set_iop(struct inode *inode)
inode->i_op = &gfs2_file_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
-
- unlock_new_inode(inode);
}
/**
@@ -119,10 +115,8 @@ void gfs2_set_iop(struct inode *inode)
* Returns: A VFS inode, or an error
*/
-struct inode *gfs2_inode_lookup(struct super_block *sb,
- unsigned int type,
- u64 no_addr,
- u64 no_formal_ino)
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ u64 no_addr, u64 no_formal_ino)
{
struct inode *inode;
struct gfs2_inode *ip;
@@ -152,51 +146,37 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
goto fail_iopen;
- ip->i_iopen_gh.gh_gl->gl_object = ip;
+ ip->i_iopen_gh.gh_gl->gl_object = ip;
gfs2_glock_put(io_gl);
io_gl = NULL;
- if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
- goto gfs2_nfsbypass;
-
- inode->i_mode = DT2IF(type);
-
- /*
- * We must read the inode in order to work out its type in
- * this case. Note that this doesn't happen often as we normally
- * know the type beforehand. This code path only occurs during
- * unlinked inode recovery (where it is safe to do this glock,
- * which is not true in the general case).
- */
if (type == DT_UNKNOWN) {
- struct gfs2_holder gh;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- if (unlikely(error))
- goto fail_glock;
- /* Inode is now uptodate */
- gfs2_glock_dq_uninit(&gh);
+ /* Inode glock must be locked already */
+ error = gfs2_inode_refresh(GFS2_I(inode));
+ if (error)
+ goto fail_refresh;
+ } else {
+ inode->i_mode = DT2IF(type);
}
gfs2_set_iop(inode);
+ unlock_new_inode(inode);
}
-gfs2_nfsbypass:
return inode;
-fail_glock:
- gfs2_glock_dq(&ip->i_iopen_gh);
+
+fail_refresh:
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_iopen:
if (io_gl)
gfs2_glock_put(io_gl);
fail_put:
- if (inode->i_state & I_NEW)
- ip->i_gl->gl_object = NULL;
+ ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
fail:
- if (inode->i_state & I_NEW)
- iget_failed(inode);
- else
- iput(inode);
+ iget_failed(inode);
return ERR_PTR(error);
}
@@ -221,14 +201,6 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
if (IS_ERR(inode))
goto fail;
- error = gfs2_inode_refresh(GFS2_I(inode));
- if (error)
- goto fail_iput;
-
- /* Pick up the works we bypass in gfs2_inode_lookup */
- if (inode->i_state & I_NEW)
- gfs2_set_iop(inode);
-
/* Two extra checks for NFS only */
if (no_formal_ino) {
error = -ESTALE;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 732a183efdb3..3e00a66e7cbd 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -96,7 +96,6 @@ err:
return -EIO;
}
-extern void gfs2_set_iop(struct inode *inode);
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 16c2ecac7eb7..ec73ed70bae1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1336,6 +1336,7 @@ static void gfs2_evict_inode(struct inode *inode)
if (error)
goto out_truncate;
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
error = gfs2_glock_nq(&ip->i_iopen_gh);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 06d1f749ca89..38f986d2447e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -993,8 +993,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
/* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
- char *path)
+static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
{
unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
@@ -1013,7 +1012,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type)
}
static const struct quotactl_ops ocfs2_quotactl_ops = {
- .quota_on = ocfs2_quota_on,
+ .quota_on_meta = ocfs2_quota_on,
.quota_off = ocfs2_quota_off,
.quota_sync = dquot_quota_sync,
.get_info = dquot_get_dqinfo,
diff --git a/fs/pipe.c b/fs/pipe.c
index 89e9e19b1b2e..da42f7db50de 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@ redo:
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
@@ -450,7 +450,7 @@ redo:
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -612,7 +612,7 @@ redo2:
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
@@ -623,7 +623,7 @@ redo2:
out:
mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
@@ -715,7 +715,7 @@ pipe_release(struct inode *inode, int decr, int decw)
if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 6a0068841d96..15af6222f8a4 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -1,5 +1,5 @@
config PROC_FS
- bool "/proc file system support" if EMBEDDED
+ bool "/proc file system support" if EXPERT
default y
help
This is a virtual file system providing information about the status
@@ -40,7 +40,7 @@ config PROC_VMCORE
Exports the dump image of crashed kernel in ELF format.
config PROC_SYSCTL
- bool "Sysctl support (/proc/sys)" if EMBEDDED
+ bool "Sysctl support (/proc/sys)" if EXPERT
depends on PROC_FS
select SYSCTL
default y
@@ -61,7 +61,7 @@ config PROC_SYSCTL
config PROC_PAGE_MONITOR
default y
depends on PROC_FS && MMU
- bool "Enable /proc page monitoring" if EMBEDDED
+ bool "Enable /proc page monitoring" if EXPERT
help
Various /proc files exist to monitor process memory utilization:
/proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 84becd3e4772..a2a622e079f0 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2189,8 +2189,8 @@ int dquot_resume(struct super_block *sb, int type)
}
EXPORT_SYMBOL(dquot_resume);
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
- struct path *path)
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+ struct path *path)
{
int error = security_quota_on(path->dentry);
if (error)
@@ -2204,20 +2204,6 @@ int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
DQUOT_LIMITS_ENABLED);
return error;
}
-EXPORT_SYMBOL(dquot_quota_on_path);
-
-int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name)
-{
- struct path path;
- int error;
-
- error = kern_path(name, LOOKUP_FOLLOW, &path);
- if (!error) {
- error = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- }
- return error;
-}
EXPORT_SYMBOL(dquot_quota_on);
/*
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b299961e1edb..b34bdb25490c 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -64,18 +64,15 @@ static int quota_sync_all(int type)
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
- void __user *addr)
+ struct path *path)
{
- char *pathname;
- int ret = -ENOSYS;
-
- pathname = getname(addr);
- if (IS_ERR(pathname))
- return PTR_ERR(pathname);
- if (sb->s_qcop->quota_on)
- ret = sb->s_qcop->quota_on(sb, type, id, pathname);
- putname(pathname);
- return ret;
+ if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_on_meta)
+ return sb->s_qcop->quota_on_meta(sb, type, id);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ return sb->s_qcop->quota_on(sb, type, id, path);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
@@ -241,7 +238,7 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
- void __user *addr)
+ void __user *addr, struct path *path)
{
int ret;
@@ -256,7 +253,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
switch (cmd) {
case Q_QUOTAON:
- return quota_quotaon(sb, type, cmd, id, addr);
+ return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
@@ -335,6 +332,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
{
uint cmds, type;
struct super_block *sb = NULL;
+ struct path path, *pathp = NULL;
int ret;
cmds = cmd >> SUBCMDSHIFT;
@@ -351,12 +349,27 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
return -ENODEV;
}
+ /*
+ * Path for quotaon has to be resolved before grabbing superblock
+ * because that gets s_umount sem which is also possibly needed by path
+ * resolution (think about autofs) and thus deadlocks could arise.
+ */
+ if (cmds == Q_QUOTAON) {
+ ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+ if (ret)
+ pathp = ERR_PTR(ret);
+ else
+ pathp = &path;
+ }
+
sb = quotactl_block(special);
if (IS_ERR(sb))
return PTR_ERR(sb);
- ret = do_quotactl(sb, type, cmds, id, addr);
+ ret = do_quotactl(sb, type, cmds, id, addr, pathp);
drop_super(sb);
+ if (pathp && !IS_ERR(pathp))
+ path_put(pathp);
return ret;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2575682a9ead..0aab04f46827 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -632,7 +632,7 @@ static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
static int reiserfs_mark_dquot_dirty(struct dquot *);
static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, struct path *);
static const struct dquot_operations reiserfs_quota_operations = {
.write_dquot = reiserfs_write_dquot,
@@ -2048,25 +2048,21 @@ static int reiserfs_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
struct inode *inode;
struct reiserfs_transaction_handle th;
if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
+ if (path->mnt->mnt_sb != sb) {
err = -EXDEV;
goto out;
}
- inode = path.dentry->d_inode;
+ inode = path->dentry->d_inode;
/* We must not pack tails for quota files on reiserfs for quota IO to work */
if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
err = reiserfs_unpack(inode, NULL);
@@ -2082,7 +2078,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
/* Journaling quota? */
if (REISERFS_SB(sb)->s_qf_names[type]) {
/* Quotafile not of fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
reiserfs_warning(sb, "super-6521",
"Quota file not on filesystem root. "
"Journalled quota will not work.");
@@ -2101,9 +2097,8 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
if (err)
goto out;
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
+ err = dquot_quota_on(sb, type, format_id, path);
out:
- path_put(&path);
return err;
}
diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig
index f4b67588b9d6..8c41feacbac5 100644
--- a/fs/sysfs/Kconfig
+++ b/fs/sysfs/Kconfig
@@ -1,5 +1,5 @@
config SYSFS
- bool "sysfs file system support" if EMBEDDED
+ bool "sysfs file system support" if EXPERT
default y
help
The sysfs filesystem is a virtual filesystem that the kernel uses to
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17714beb868e..5b6c391efc8e 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 9cf736ea4691..fc1575fd4596 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index bc4a6deb73b0..ef1cef77d32b 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a091cabca4b1..de39915f6b7f 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 65b3f5888f42..a3252a5ead66 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 241b8a04c83c..e46ec95a8ada 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20101209
+#define ACPI_CA_VERSION 0x20110112
#include "actypes.h"
#include "actbl.h"
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index e5526354ba5e..0a66cc45dd6b 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index ad2001683ba7..7e42bfee0e29 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index cd77aa75c962..7504bc99b29b 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index d4136b28011f..0fc15dfb2e22 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 939a431a6ab6..64f838beaabf 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index a3e334ab1119..5af3ed52ef98 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 5dcb9537343c..e228893591a9 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 572189e37133..5d2a5e9544d9 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 68649336c4ad..6ebb81030d2d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -364,6 +364,13 @@
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
+ } \
+ \
+ /* Built-in module versions. */ \
+ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___modver) = .; \
+ *(__modver) \
+ VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 2296d8b1931f..b0ada6f37dd6 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -1,5 +1,6 @@
header-y += byteorder/
header-y += can/
+header-y += caif/
header-y += dvb/
header-y += hdlc/
header-y += isdn/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index eb176bb1b15b..a2e910e01293 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -306,9 +306,6 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
extern void acpi_early_init(void);
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
-
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
new file mode 100644
index 000000000000..7180013a4a3a
--- /dev/null
+++ b/include/linux/acpi_io.h
@@ -0,0 +1,16 @@
+#ifndef _ACPI_IO_H_
+#define _ACPI_IO_H_
+
+#include <linux/io.h>
+#include <acpi/acpi.h>
+
+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
+ acpi_size size)
+{
+ return ioremap_cache(phys, size);
+}
+
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
+#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 359df0487690..9d339eb27881 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -103,6 +103,8 @@
#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/linux/caif/Kbuild b/include/linux/caif/Kbuild
new file mode 100644
index 000000000000..a9cf250689dc
--- /dev/null
+++ b/include/linux/caif/Kbuild
@@ -0,0 +1,2 @@
+header-y += caif_socket.h
+header-y += if_caif.h
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644
index 000000000000..473771a528c0
--- /dev/null
+++ b/include/linux/cpu_rmap.h
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ * based on affinity masks
+ */
+struct cpu_rmap {
+ u16 size, used;
+ void **obj;
+ struct {
+ u16 index;
+ u16 dist;
+ } near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+ kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+ return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 010e2d87ed75..d638e85dc501 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -279,8 +279,6 @@ enum dccp_state {
DCCP_MAX_STATES
};
-#define DCCP_STATE_MASK 0x1f
-
enum {
DCCPF_OPEN = TCPF_ESTABLISHED,
DCCPF_REQUESTING = TCPF_SYN_SENT,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a3b148a91874..0b84c61607e8 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -249,7 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
((1 << ZONES_SHIFT) - 1);
if (__builtin_constant_p(bit))
- MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
else {
#ifdef CONFIG_DEBUG_VM
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 6485d2a89bec..f4a2e6b1b864 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -135,6 +135,7 @@ enum {
IFLA_VF_PORTS,
IFLA_PORT_SELF,
IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
__IFLA_MAX
};
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d4253e49..63c5ad78e37c 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
@@ -240,6 +242,35 @@ extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq: Interrupt to which notification applies
+ * @kref: Reference count, for internal use
+ * @work: Work item, for internal use
+ * @notify: Function to be called on change. This will be
+ * called in process context.
+ * @release: Function to be called on release. This will be
+ * called in process context. Once registered, the
+ * structure must only be freed when this function is
+ * called or later.
+ */
+struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+ flush_scheduled_work();
+}
+
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +286,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_set_affinity_hint(unsigned int irq,
- const struct cpumask *m)
+ const struct cpumask *m)
{
return -EINVAL;
}
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 5f43a3b2e3ad..4deb3834d62c 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -89,6 +89,14 @@
#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */
#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+ IP_VS_CONN_F_NOOUTPUT | \
+ IP_VS_CONN_F_INACTIVE | \
+ IP_VS_CONN_F_SEQ_MASK | \
+ IP_VS_CONN_F_NO_CPORT | \
+ IP_VS_CONN_F_TEMPLATE \
+ )
+
/* Flags that are not sent to backup server start from bit 16 */
#define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 6a64c6fa81af..bfef56dadddb 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -8,6 +8,7 @@
* For now it's included from <linux/irq.h>
*/
+struct irq_affinity_notify;
struct proc_dir_entry;
struct timer_rand_state;
/**
@@ -24,6 +25,7 @@ struct timer_rand_state;
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
+ * @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
@@ -70,6 +72,7 @@ struct irq_desc {
raw_spinlock_t lock;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
+ struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
@@ -101,13 +104,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
/*
- * Monolithic do_IRQ implementation.
- */
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-extern unsigned int __do_IRQ(unsigned int irq);
-#endif
-
-/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
@@ -115,14 +111,7 @@ extern unsigned int __do_IRQ(unsigned int irq);
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
-#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
-#else
- if (likely(desc->handle_irq))
- desc->handle_irq(irq, desc);
- else
- __do_IRQ(irq);
-#endif
}
static inline void generic_handle_irq(unsigned int irq)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5a9d9059520b..e2f4d6af2125 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -243,6 +243,8 @@ extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
extern int root_mountflags;
+extern bool early_boot_irqs_disabled;
+
/* Values used for system_state */
extern enum system_states {
SYSTEM_BOOTING,
@@ -573,12 +575,6 @@ struct sysinfo {
char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
};
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-
-/* Force a compilation error if condition is constant and true */
-#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
-
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -590,6 +586,32 @@ struct sysinfo {
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @cond: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but
+ * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+ * to inline functions). So as a fallback we use the optimizer; if it can't
+ * prove the condition is false, it will cause a link error on the undefined
+ * "__build_bug_on_failed". This error message can be harder to track down
+ * though, hence the two different methods.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int __build_bug_on_failed;
+#define BUILD_BUG_ON(condition) \
+ do { \
+ ((void)sizeof(char[1 - 2*!!(condition)])); \
+ if (condition) __build_bug_on_failed = 1; \
+ } while(0)
+#endif
+
/* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__)
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 08d7dc4ddf40..39f8453239f7 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -76,7 +76,7 @@ bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
- MAYBE_BUILD_BUG_ON(_n < 0); \
+ BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
} while (0)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 71c09b26c759..4aef1dda6406 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -436,16 +436,8 @@ do { \
#endif /* CONFIG_LOCKDEP */
#ifdef CONFIG_TRACE_IRQFLAGS
-extern void early_boot_irqs_off(void);
-extern void early_boot_irqs_on(void);
extern void print_irqtrace_events(struct task_struct *curr);
#else
-static inline void early_boot_irqs_off(void)
-{
-}
-static inline void early_boot_irqs_on(void)
-{
-}
static inline void print_irqtrace_events(struct task_struct *curr)
{
}
@@ -522,12 +514,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
+# define lock_map_acquire_read(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a576f989437..f512e189be5a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -146,6 +146,10 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask);
u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
+#endif
+
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup;
@@ -335,6 +339,11 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
return 0;
}
+static inline void mem_cgroup_split_huge_fixup(struct page *head,
+ struct page *tail)
+{
+}
+
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 956a35532f47..f6385fc17ad4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -470,6 +470,7 @@ static inline void set_compound_order(struct page *page, unsigned long order)
page[1].lru.prev = (void *)order;
}
+#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
* servicing faults for write access. In the normal case, do always want
@@ -482,6 +483,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
pte = pte_mkwrite(pte);
return pte;
}
+#endif
/*
* Multiple processes may "see" the same page. E.g. for untouched
diff --git a/include/linux/module.h b/include/linux/module.h
index 8b17fd8c790d..e7c6385c6683 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -58,6 +58,12 @@ struct module_attribute {
void (*free)(struct module *);
};
+struct module_version_attribute {
+ struct module_attribute mattr;
+ const char *module_name;
+ const char *version;
+};
+
struct module_kobject
{
struct kobject kobj;
@@ -161,7 +167,28 @@ extern struct module __this_module;
Using this automatically adds a checksum of the .c files and the
local headers in "srcversion".
*/
+
+#if defined(MODULE) || !defined(CONFIG_SYSFS)
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#else
+#define MODULE_VERSION(_version) \
+ extern ssize_t __modver_version_show(struct module_attribute *, \
+ struct module *, char *); \
+ static struct module_version_attribute __modver_version_attr \
+ __used \
+ __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
+ = { \
+ .mattr = { \
+ .attr = { \
+ .name = "version", \
+ .mode = S_IRUGO, \
+ }, \
+ .show = __modver_version_show, \
+ }, \
+ .module_name = KBUILD_MODNAME, \
+ .version = _version, \
+ }
+#endif
/* Optional firmware file (or files) needed by the module
* format is simply firmware file name. Multiple firmware
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 112adf8bd47d..07b41951e3fa 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,15 +16,17 @@
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-#ifdef MODULE
#define ___module_cat(a,b) __mod_ ## a ## b
#define __module_cat(a,b) ___module_cat(a,b)
+#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
static const char __module_cat(name,__LINE__)[] \
__used __attribute__((section(".modinfo"), unused, aligned(1))) \
= __stringify(tag) "=" info
#else /* !MODULE */
-#define __MODULE_INFO(tag, name, info)
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO(tag, name, info) \
+ struct __module_cat(name,__LINE__) {}
#endif
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 0fa7a3a874c8..b21d567692b2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -150,6 +150,7 @@ static inline int ip_mroute_opt(int opt)
extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip_mr_init(void);
#else
static inline
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346b0340..c7d707452228 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -75,6 +75,9 @@ struct wireless_dev;
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP 0
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -551,14 +554,16 @@ struct rps_map {
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
*/
struct rps_dev_flow {
u16 cpu;
- u16 fill;
+ u16 filter;
unsigned int last_qtail;
};
+#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id);
+#endif
+
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map __rcu *rps_map;
@@ -643,6 +653,14 @@ struct xps_dev_maps {
(nr_cpu_ids * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
+#define TC_MAX_QUEUE 16
+#define TC_BITMASK 15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+ u16 count;
+ u16 offset;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,18 @@ struct xps_dev_maps {
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ * Called to setup 'tc' number of traffic classes in the net device. This
+ * is always called from the stack with the rtnl lock held and netif tx
+ * queues stopped. This allows the netdevice to perform queue management
+ * safely.
+ *
+ * RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ * u16 rxq_index, u32 flow_id);
+ * Set hardware filter for RFS. rxq_index is the target queue index;
+ * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ * Return the filter ID on success, or a negative error code.
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
@@ -811,6 +841,7 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -825,6 +856,12 @@ struct net_device_ops {
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
+#ifdef CONFIG_RFS_ACCEL
+ int (*ndo_rx_flow_steer)(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 rxq_index,
+ u32 flow_id);
+#endif
};
/*
@@ -877,7 +914,11 @@ struct net_device {
struct list_head unreg_list;
/* Net device features */
- unsigned long features;
+ u32 features;
+
+ /* VLAN feature mask */
+ u32 vlan_features;
+
#define NETIF_F_SG 1 /* Scatter/gather IO. */
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
@@ -1039,6 +1080,13 @@ struct net_device {
/* Number of RX queues currently active in device */
unsigned int real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+ /* CPU reverse-mapping for RX completion interrupts, indexed
+ * by RX queue number. Assigned by driver. This must only be
+ * set if the ndo_rx_flow_steer operation is defined. */
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
#endif
rx_handler_func_t __rcu *rx_handler;
@@ -1132,9 +1180,6 @@ struct net_device {
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
- /* VLAN feature mask */
- unsigned long vlan_features;
-
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
@@ -1143,6 +1188,9 @@ struct net_device {
/* Data Center Bridging netlink ops */
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/* max exchange id for FCoE LRO by ddp */
@@ -1153,12 +1201,66 @@ struct net_device {
/* phy device may attach itself for hardware timestamping */
struct phy_device *phydev;
+
+ /* group the device belongs to */
+ int group;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+ return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+ return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+ dev->num_tc = 0;
+ memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+ memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->tc_to_txq[tc].count = count;
+ dev->tc_to_txq[tc].offset = offset;
+ return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+ if (num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ dev->num_tc = num_tc;
+ return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+ return dev->num_tc;
+}
+
+static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
@@ -1300,7 +1402,7 @@ struct packet_type {
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
@@ -1345,7 +1447,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev)
struct net *net;
net = dev_net(dev);
- lh = rcu_dereference(dev->dev_list.next);
+ lh = rcu_dereference(list_next_rcu(&dev->dev_list));
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
@@ -1355,6 +1457,13 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+ struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1844,6 +1953,7 @@ extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
+extern void dev_set_group(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
@@ -2268,7 +2378,7 @@ extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
@@ -2295,22 +2405,21 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
extern void linkwatch_run_queue(void);
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 1893837b3966..eeec00abb664 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -24,16 +24,20 @@
#define NF_MAX_VERDICT NF_STOP
/* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
#define NF_VERDICT_QMASK 0xffff0000
#define NF_VERDICT_QBITS 16
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
/* only for userspace compatibility */
#ifndef __KERNEL__
@@ -41,6 +45,9 @@
<= 0x2000 is used for protocol-flags. */
#define NFC_UNKNOWN 0x4000
#define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
#endif
enum nf_inet_hooks {
@@ -72,6 +79,10 @@ union nf_inet_addr {
#ifdef __KERNEL__
#ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+ return -(verdict >> NF_VERDICT_QBITS);
+}
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@ struct nf_afinfo {
int route_key_size;
};
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 9d40effe7ca7..15e83bf3dd58 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -1,3 +1,5 @@
+header-y += ipset/
+
header-y += nf_conntrack_common.h
header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@ header-y += nfnetlink_conntrack.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
header-y += x_tables.h
+header-y += xt_AUDIT.h
header-y += xt_CHECKSUM.h
header-y += xt_CLASSIFY.h
header-y += xt_CONNMARK.h
@@ -34,6 +37,7 @@ header-y += xt_connmark.h
header-y += xt_conntrack.h
header-y += xt_cpu.h
header-y += xt_dccp.h
+header-y += xt_devgroup.h
header-y += xt_dscp.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
@@ -54,7 +58,9 @@ header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_set.h
header-y += xt_sctp.h
+header-y += xt_socket.h
header-y += xt_state.h
header-y += xt_statistic.h
header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644
index 000000000000..601fe71d34d5
--- /dev/null
+++ b/include/linux/netfilter/ipset/Kbuild
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644
index 000000000000..ec333d83f3b4
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL 6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN 32
+
+/* Message types and commands */
+enum ipset_cmd {
+ IPSET_CMD_NONE,
+ IPSET_CMD_PROTOCOL, /* 1: Return protocol version */
+ IPSET_CMD_CREATE, /* 2: Create a new (empty) set */
+ IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */
+ IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */
+ IPSET_CMD_RENAME, /* 5: Rename a set */
+ IPSET_CMD_SWAP, /* 6: Swap two sets */
+ IPSET_CMD_LIST, /* 7: List sets */
+ IPSET_CMD_SAVE, /* 8: Save sets */
+ IPSET_CMD_ADD, /* 9: Add an element to a set */
+ IPSET_CMD_DEL, /* 10: Delete an element from a set */
+ IPSET_CMD_TEST, /* 11: Test an element in a set */
+ IPSET_CMD_HEADER, /* 12: Get set header data only */
+ IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_MSG_MAX, /* Netlink message commands */
+
+ /* Commands in userspace: */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+ IPSET_CMD_HELP, /* 15: Get help */
+ IPSET_CMD_VERSION, /* 16: Get program version */
+ IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+
+ IPSET_CMD_MAX,
+
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+ IPSET_ATTR_UNSPEC,
+ IPSET_ATTR_PROTOCOL, /* 1: Protocol version */
+ IPSET_ATTR_SETNAME, /* 2: Name of the set */
+ IPSET_ATTR_TYPENAME, /* 3: Typename */
+ IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+ IPSET_ATTR_REVISION, /* 4: Settype revision */
+ IPSET_ATTR_FAMILY, /* 5: Settype family */
+ IPSET_ATTR_FLAGS, /* 6: Flags at command level */
+ IPSET_ATTR_DATA, /* 7: Nested attributes */
+ IPSET_ATTR_ADT, /* 8: Multiple data containers */
+ IPSET_ATTR_LINENO, /* 9: Restore lineno */
+ IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+ IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ __IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+ IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+ IPSET_ATTR_IP_TO, /* 2 */
+ IPSET_ATTR_CIDR, /* 3 */
+ IPSET_ATTR_PORT, /* 4 */
+ IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+ IPSET_ATTR_PORT_TO, /* 5 */
+ IPSET_ATTR_TIMEOUT, /* 6 */
+ IPSET_ATTR_PROTO, /* 7 */
+ IPSET_ATTR_CADT_FLAGS, /* 8 */
+ IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */
+ /* Reserve empty slots */
+ IPSET_ATTR_CADT_MAX = 16,
+ /* Create-only specific attributes */
+ IPSET_ATTR_GC,
+ IPSET_ATTR_HASHSIZE,
+ IPSET_ATTR_MAXELEM,
+ IPSET_ATTR_NETMASK,
+ IPSET_ATTR_PROBES,
+ IPSET_ATTR_RESIZE,
+ IPSET_ATTR_SIZE,
+ /* Kernel-only */
+ IPSET_ATTR_ELEMENTS,
+ IPSET_ATTR_REFERENCES,
+ IPSET_ATTR_MEMSIZE,
+
+ __IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+ IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+ IPSET_ATTR_NAME,
+ IPSET_ATTR_NAMEREF,
+ IPSET_ATTR_IP2,
+ IPSET_ATTR_CIDR2,
+ __IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+ IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IPADDR_IPV6,
+ __IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+ IPSET_ERR_PRIVATE = 4096,
+ IPSET_ERR_PROTOCOL,
+ IPSET_ERR_FIND_TYPE,
+ IPSET_ERR_MAX_SETS,
+ IPSET_ERR_BUSY,
+ IPSET_ERR_EXIST_SETNAME2,
+ IPSET_ERR_TYPE_MISMATCH,
+ IPSET_ERR_EXIST,
+ IPSET_ERR_INVALID_CIDR,
+ IPSET_ERR_INVALID_NETMASK,
+ IPSET_ERR_INVALID_FAMILY,
+ IPSET_ERR_TIMEOUT,
+ IPSET_ERR_REFERENCED,
+ IPSET_ERR_IPADDR_IPV4,
+ IPSET_ERR_IPADDR_IPV6,
+
+ /* Type specific error codes */
+ IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+ IPSET_FLAG_BIT_EXIST = 0,
+ IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+ IPSET_FLAG_BIT_BEFORE = 0,
+ IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+ IPSET_ADD,
+ IPSET_DEL,
+ IPSET_TEST,
+ IPSET_ADT_MAX,
+ IPSET_CREATE = IPSET_ADT_MAX,
+ IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID 65535
+
+enum ip_set_dim {
+ IPSET_DIM_ZERO = 0,
+ IPSET_DIM_ONE,
+ IPSET_DIM_TWO,
+ IPSET_DIM_THREE,
+ /* Max dimension in elements.
+ * If changed, new revision of iptables match/target is required.
+ */
+ IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+ IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+ IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+ IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+ IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+ IPSET_TYPE_IP_FLAG = 0,
+ IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+ IPSET_TYPE_PORT_FLAG = 1,
+ IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+ IPSET_TYPE_MAC_FLAG = 2,
+ IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+ IPSET_TYPE_IP2_FLAG = 3,
+ IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+ IPSET_TYPE_NAME_FLAG = 4,
+ IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+ /* Strictly speaking not a feature, but a flag for dumping:
+ * this settype must be dumped last */
+ IPSET_DUMP_LAST_FLAG = 7,
+ IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+ /* Kernelspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+ /* Userspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+ /* Low level add/del/test functions */
+ ipset_adtfn adt[IPSET_ADT_MAX];
+
+ /* When adding entries and set is full, try to resize the set */
+ int (*resize)(struct ip_set *set, bool retried);
+ /* Destroy the set */
+ void (*destroy)(struct ip_set *set);
+ /* Flush the elements */
+ void (*flush)(struct ip_set *set);
+ /* Expire entries before listing */
+ void (*expire)(struct ip_set *set);
+ /* List set header data */
+ int (*head)(struct ip_set *set, struct sk_buff *skb);
+ /* List elements */
+ int (*list)(const struct ip_set *set, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+ struct list_head list;
+
+ /* Typename */
+ char name[IPSET_MAXNAMELEN];
+ /* Protocol version */
+ u8 protocol;
+ /* Set features to control swapping */
+ u8 features;
+ /* Set type dimension */
+ u8 dimension;
+ /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+ u8 family;
+ /* Type revision */
+ u8 revision;
+
+ /* Create set */
+ int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+ /* Attribute policies */
+ const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+ const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+ /* The name of the set */
+ char name[IPSET_MAXNAMELEN];
+ /* Lock protecting the set data */
+ rwlock_t lock;
+ /* References to the set */
+ atomic_t ref;
+ /* The core set type */
+ struct ip_set_type *type;
+ /* The type variant doing the real job */
+ const struct ip_set_type_variant *variant;
+ /* The actual INET family of the set */
+ u8 family;
+ /* The type specific data */
+ void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+ __be32 ip;
+ int ret = ip_set_get_ipaddr4(nla, &ip);
+
+ if (ret)
+ return ret;
+ *ipaddr = ntohl(ip);
+ return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+ return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+ return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+ return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+ return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+ return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \
+ sizeof(struct in6_addr), ipaddrptr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+ return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+ *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+ memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+ sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET 83
+
+union ip_set_name_index {
+ char name[IPSET_MAXNAMELEN];
+ ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
+struct ip_set_req_get_set {
+ unsigned op;
+ unsigned version;
+ union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
+struct ip_set_req_version {
+ unsigned op;
+ unsigned version;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644
index 000000000000..ec9d9bea1e37
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE 4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE (3*4)
+
+/* A hash bucket */
+struct hbucket {
+ void *value; /* the array of the values */
+ u8 size; /* size of the array */
+ u8 pos; /* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+ u8 htable_bits; /* size of hash table == 2^htable_bits */
+ struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i) &((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+ u8 cidr; /* the different cidr values in the set */
+ u32 nets; /* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+ struct htable *table; /* the hash table */
+ u32 maxelem; /* max elements in the hash */
+ u32 elements; /* current element (vs timeout) */
+ u32 initval; /* random jhash init value */
+ u32 timeout; /* timeout value, if enabled */
+ struct timer_list gc; /* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+ u8 netmask; /* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+ struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+ /* Assume that hashsize == 2^htable_bits */
+ u8 bits = fls(hashsize - 1);
+ if (jhash_size(bits) != hashsize)
+ /* Round up to the first 2^n value */
+ bits = fls(hashsize);
+
+ return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ ++h->nets[cidr-1].nets;
+
+ pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets > 1)
+ return;
+
+ /* New cidr size */
+ for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+ /* Add in increasing prefix order, so larger cidr first */
+ if (h->nets[i].cidr < cidr)
+ swap(h->nets[i].cidr, cidr);
+ }
+ if (i < host_mask)
+ h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ --h->nets[cidr-1].nets;
+
+ pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets != 0)
+ return;
+
+ /* All entries with this cidr size deleted, so cleanup h->cidr[] */
+ for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+ if (h->nets[i].cidr == cidr)
+ h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+ }
+ h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size)
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+
+ ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+ u32 i;
+ struct htable *t = h->table;
+ size_t memsize = sizeof(*h)
+ + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+ + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+ + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++)
+ memsize += t->bucket[i].size * dsize;
+
+ return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size) {
+ n->size = n->pos = 0;
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+ }
+#ifdef IP_SET_HASH_WITH_NETS
+ memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+ * SET_HOST_MASK(set->family));
+#endif
+ h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ if (with_timeout(h->timeout))
+ del_timer_sync(&h->gc);
+
+ ahash_destroy(h->table);
+ kfree(h);
+
+ set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits) \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+ & jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c) a##b##c
+#define TOKEN(a, b, c) CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem TOKEN(TYPE, PF, _elem)
+#define type_pf_telem TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add TOKEN(TYPE, PF, _add)
+#define type_pf_del TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush ip_set_hash_flush
+#define type_pf_destroy ip_set_hash_destroy
+#define type_pf_head TOKEN(TYPE, PF, _head)
+#define type_pf_list TOKEN(TYPE, PF, _list)
+#define type_pf_tlist TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i) \
+ ((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_elem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ type_pf_data_copy(ahash_data(n, n->pos++), value);
+ return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+retry:
+ ret = 0;
+ htable_bits++;
+ pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+ set->name, orig->htable_bits, htable_bits, orig);
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_data(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_add(m, data);
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+ orig->htable_bits, orig, t->htable_bits, t);
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++)
+ if (type_pf_data_equal(ahash_data(n, i), d)) {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+
+ ret = type_pf_elem_add(n, value);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_elem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ pr_debug("test by nets\n");
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *d = value;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ /* If we test an IP address and not a network address,
+ * try all possible network sizes */
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct ip_set_hash *h = set->data;
+ struct nlattr *nested;
+ size_t memsize;
+
+ read_lock_bh(&set->lock);
+ memsize = ahash_memsize(h, with_timeout(h->timeout)
+ ? sizeof(struct type_pf_telem)
+ : sizeof(struct type_pf_elem),
+ set->family == AF_INET ? 32 : 128);
+ read_unlock_bh(&set->lock);
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+ htonl(jhash_size(h->table->htable_bits)));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+ if (h->netmask != HOST_MASK)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+ if (with_timeout(h->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ pr_debug("list hash set %s\n", set->name);
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+ cb->args[2], n, i, data);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_list(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_add,
+ [IPSET_DEL] = type_pf_del,
+ [IPSET_TEST] = type_pf_test,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_list,
+ .resize = type_pf_resize,
+ .same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+ (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+ struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+ tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+ u32 timeout)
+{
+ struct type_pf_elem *data;
+
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_telem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ data = ahash_tdata(n, n->pos++);
+ type_pf_data_copy(data, value);
+ type_pf_data_timeout_set(data, timeout);
+ return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+ struct htable *t = h->table;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ u32 i;
+ int j;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ if (type_pf_data_expired(data)) {
+ pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+#endif
+ if (j != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data,
+ ahash_tdata(n, n->pos - 1));
+ n->pos--;
+ h->elements--;
+ }
+ }
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ /* Still try to delete expired elements */
+ continue;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ }
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+ /* Try to cleanup once */
+ if (!retried) {
+ i = h->elements;
+ write_lock_bh(&set->lock);
+ type_pf_expire(set->data);
+ write_unlock_bh(&set->lock);
+ if (h->elements < i)
+ return 0;
+ }
+
+retry:
+ ret = 0;
+ htable_bits++;
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_tadd(m, data,
+ type_pf_data_timeout(data));
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ /* FIXME: when set is full, we slow down here */
+ type_pf_expire(h);
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d)) {
+ if (type_pf_data_expired(data))
+ j = i;
+ else {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+ } else if (j == AHASH_MAX_SIZE + 1 &&
+ type_pf_data_expired(data))
+ j = i;
+ }
+ if (j != AHASH_MAX_SIZE + 1) {
+ data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ type_pf_data_copy(data, d);
+ type_pf_data_timeout_set(data, timeout);
+ goto out;
+ }
+ ret = type_pf_elem_tadd(n, d, timeout);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (type_pf_data_expired(data))
+ ret = -IPSET_ERR_EXIST;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data;
+ struct hbucket *n;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ }
+ return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data, *d = value;
+ struct hbucket *n;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ pr_debug("list %p %u\n", n, i);
+ if (type_pf_data_expired(data))
+ continue;
+ pr_debug("do list %p %u\n", n, i);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_tlist(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_tadd,
+ [IPSET_DEL] = type_pf_tdel,
+ [IPSET_TEST] = type_pf_ttest,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_tlist,
+ .resize = type_pf_tresize,
+ .same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct ip_set_hash *h = set->data;
+
+ pr_debug("called\n");
+ write_lock_bh(&set->lock);
+ type_pf_expire(h);
+ write_unlock_bh(&set->lock);
+
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ init_timer(&h->gc);
+ h->gc.data = (unsigned long) set;
+ h->gc.function = type_pf_gc;
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+ pr_debug("gc initialized, run in every %u\n",
+ IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644
index 000000000000..61a9e8746c83
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+ /* The element is out of the range of the set */
+ IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+ /* The range exceeds the size limit of the set type */
+ IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+ u32 mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644
index 000000000000..3882a81a3b3c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+ __be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644
index 000000000000..b86f15c04524
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+ /* Hash is full */
+ IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+ /* Null-valued element */
+ IPSET_ERR_HASH_ELEM,
+ /* Invalid protocol */
+ IPSET_ERR_INVALID_PROTO,
+ /* Protocol missing but must be specified */
+ IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE 1024
+#define IPSET_MIMINAL_HASHSIZE 64
+#define IPSET_DEFAULT_MAXELEM 65536
+#define IPSET_DEFAULT_PROBES 4
+#define IPSET_DEFAULT_RESIZE 100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644
index 000000000000..40a63f302613
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+ /* Set name to be added/deleted/tested does not exist. */
+ IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+ /* list:set type is not permitted to add */
+ IPSET_ERR_LOOP,
+ /* Missing reference set */
+ IPSET_ERR_BEFORE,
+ /* Reference set does not exist */
+ IPSET_ERR_NAMEREF,
+ /* Set is full */
+ IPSET_ERR_LIST_FULL,
+ /* Reference set is not added to the set */
+ IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE 8
+#define IP_SET_LIST_MIN_SIZE 4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644
index 000000000000..9f30c5f2ec1c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT UINT_MAX
+
+#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+ unsigned int timeout = ip_set_get_h32(tb);
+
+ /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET 0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT (UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ (timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+ /* Bingo! */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT 0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
+ /* Bingo! :-) */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644
index 000000000000..0e1fb50da562
--- /dev/null
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h>
+
+/* Prefixlen maps, by Jan Engelhardt */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+ return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+ return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+ return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+ return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644
index 000000000000..064bc63a5346
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 361d6b5630ee..2b11fc1a86be 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -47,7 +47,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_QUEUE 3
#define NFNL_SUBSYS_ULOG 4
#define NFNL_SUBSYS_OSF 5
-#define NFNL_SUBSYS_COUNT 6
+#define NFNL_SUBSYS_IPSET 6
+#define NFNL_SUBSYS_COUNT 7
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 19711e3ffd42..debf1aefd753 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -42,6 +42,7 @@ enum ctattr_type {
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
+ CTA_TIMESTAMP,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@ enum ctattr_counters {
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
+enum ctattr_tstamp {
+ CTA_TIMESTAMP_UNSPEC,
+ CTA_TIMESTAMP_START,
+ CTA_TIMESTAMP_STOP,
+ __CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
enum ctattr_nat {
CTA_NAT_UNSPEC,
CTA_NAT_MINIP,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 6712e713b299..37219525ff6f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -611,8 +611,9 @@ struct _compat_xt_align {
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644
index 000000000000..38751d2ea52b
--- /dev/null
+++ b/include/linux/netfilter/xt_AUDIT.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+ XT_AUDIT_TYPE_ACCEPT = 0,
+ XT_AUDIT_TYPE_DROP,
+ XT_AUDIT_TYPE_REJECT,
+ __XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+ __u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index 1b564106891d..b56e76811c04 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -1,14 +1,16 @@
#ifndef _XT_CT_H
#define _XT_CT_H
+#include <linux/types.h>
+
#define XT_CT_NOTRACK 0x1
struct xt_ct_target_info {
- u_int16_t flags;
- u_int16_t zone;
- u_int32_t ct_events;
- u_int32_t exp_events;
- char helper[16];
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
/* Used internally by the kernel */
struct nf_conn *ct __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
index 2584f4a777de..9eafdbbb401c 100644
--- a/include/linux/netfilter/xt_NFQUEUE.h
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -20,4 +20,10 @@ struct xt_NFQ_info_v1 {
__u16 queues_total;
};
+struct xt_NFQ_info_v2 {
+ __u16 queuenum;
+ __u16 queues_total;
+ __u16 bypass;
+};
+
#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h
index 2db543214ff5..7157318499c2 100644
--- a/include/linux/netfilter/xt_TCPOPTSTRIP.h
+++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h
@@ -1,13 +1,15 @@
#ifndef _XT_TCPOPTSTRIP_H
#define _XT_TCPOPTSTRIP_H
+#include <linux/types.h>
+
#define tcpoptstrip_set_bit(bmap, idx) \
(bmap[(idx) >> 5] |= 1U << (idx & 31))
#define tcpoptstrip_test_bit(bmap, idx) \
(((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
struct xt_tcpoptstrip_target_info {
- u_int32_t strip_bmap[8];
+ __u32 strip_bmap[8];
};
#endif /* _XT_TCPOPTSTRIP_H */
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h
index 3f3d69361289..902043c2073f 100644
--- a/include/linux/netfilter/xt_TPROXY.h
+++ b/include/linux/netfilter/xt_TPROXY.h
@@ -1,19 +1,21 @@
#ifndef _XT_TPROXY_H
#define _XT_TPROXY_H
+#include <linux/types.h>
+
/* TPROXY target is capable of marking the packet to perform
* redirection. We can get rid of that whenever we get support for
* mutliple targets in the same rule. */
struct xt_tproxy_target_info {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
__be32 laddr;
__be16 lport;
};
struct xt_tproxy_target_info_v1 {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
union nf_inet_addr laddr;
__be16 lport;
};
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
index 886682656f09..9b883c8fbf54 100644
--- a/include/linux/netfilter/xt_cluster.h
+++ b/include/linux/netfilter/xt_cluster.h
@@ -1,15 +1,17 @@
#ifndef _XT_CLUSTER_MATCH_H
#define _XT_CLUSTER_MATCH_H
+#include <linux/types.h>
+
enum xt_cluster_flags {
XT_CLUSTER_F_INV = (1 << 0)
};
struct xt_cluster_match_info {
- u_int32_t total_nodes;
- u_int32_t node_mask;
- u_int32_t hash_seed;
- u_int32_t flags;
+ __u32 total_nodes;
+ __u32 node_mask;
+ __u32 hash_seed;
+ __u32 flags;
};
#define XT_CLUSTER_NODES_MAX 32
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
index eacfedc6b5d0..0ea5e79f5bd7 100644
--- a/include/linux/netfilter/xt_comment.h
+++ b/include/linux/netfilter/xt_comment.h
@@ -4,7 +4,7 @@
#define XT_MAX_COMMENT_LEN 256
struct xt_comment_info {
- unsigned char comment[XT_MAX_COMMENT_LEN];
+ char comment[XT_MAX_COMMENT_LEN];
};
#endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 7e3284bcbd2b..0ca66e97acbc 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -1,8 +1,15 @@
#ifndef _XT_CONNLIMIT_H
#define _XT_CONNLIMIT_H
+#include <linux/types.h>
+
struct xt_connlimit_data;
+enum {
+ XT_CONNLIMIT_INVERT = 1 << 0,
+ XT_CONNLIMIT_DADDR = 1 << 1,
+};
+
struct xt_connlimit_info {
union {
union nf_inet_addr mask;
@@ -13,7 +20,14 @@ struct xt_connlimit_info {
};
#endif
};
- unsigned int limit, inverse;
+ unsigned int limit;
+ union {
+ /* revision 0 */
+ unsigned int inverse;
+
+ /* revision 1 */
+ __u32 flags;
+ };
/* Used internally by the kernel */
struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 54f47a2f6152..74b904d8f99c 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -58,4 +58,19 @@ struct xt_conntrack_mtinfo2 {
__u16 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo3 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __u16 origsrc_port, origdst_port;
+ __u16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+ __u16 origsrc_port_high, origdst_port_high;
+ __u16 replsrc_port_high, repldst_port_high;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644
index 000000000000..1babde0ec900
--- /dev/null
+++ b/include/linux/netfilter/xt_devgroup.h
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+ XT_DEVGROUP_MATCH_SRC = 0x1,
+ XT_DEVGROUP_INVERT_SRC = 0x2,
+ XT_DEVGROUP_MATCH_DST = 0x4,
+ XT_DEVGROUP_INVERT_DST = 0x8,
+};
+
+struct xt_devgroup_info {
+ __u32 flags;
+ __u32 src_group;
+ __u32 src_mask;
+ __u32 dst_group;
+ __u32 dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index b0d28c659ab7..ca6e03e47a17 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -1,6 +1,8 @@
#ifndef _XT_QUOTA_H
#define _XT_QUOTA_H
+#include <linux/types.h>
+
enum xt_quota_flags {
XT_QUOTA_INVERT = 0x1,
};
@@ -9,9 +11,9 @@ enum xt_quota_flags {
struct xt_quota_priv;
struct xt_quota_info {
- u_int32_t flags;
- u_int32_t pad;
- aligned_u64 quota;
+ __u32 flags;
+ __u32 pad;
+ aligned_u64 quota;
/* Used internally by the kernel */
struct xt_quota_priv *master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644
index 000000000000..081f1ded2842
--- /dev/null
+++ b/include/linux/netfilter/xt_set.h
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC 0x01 /* Source match/add */
+#define IPSET_DST 0x02 /* Destination match/add */
+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
+
+struct xt_set_info_v0 {
+ ip_set_id_t index;
+ union {
+ __u32 flags[IPSET_DIM_MAX + 1];
+ struct {
+ __u32 __flags[IPSET_DIM_MAX];
+ __u8 dim;
+ __u8 flags;
+ } compat;
+ } u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+ struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+ struct xt_set_info_v0 add_set;
+ struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+ ip_set_id_t index;
+ __u8 dim;
+ __u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+ struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 6f475b8ff34b..26d7217bd4f1 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -1,6 +1,8 @@
#ifndef _XT_SOCKET_H
#define _XT_SOCKET_H
+#include <linux/types.h>
+
enum {
XT_SOCKET_TRANSPARENT = 1 << 0,
};
diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h
index 14b6df412c9f..7c37fac576c4 100644
--- a/include/linux/netfilter/xt_time.h
+++ b/include/linux/netfilter/xt_time.h
@@ -1,14 +1,16 @@
#ifndef _XT_TIME_H
#define _XT_TIME_H 1
+#include <linux/types.h>
+
struct xt_time_info {
- u_int32_t date_start;
- u_int32_t date_stop;
- u_int32_t daytime_start;
- u_int32_t daytime_stop;
- u_int32_t monthdays_match;
- u_int8_t weekdays_match;
- u_int8_t flags;
+ __u32 date_start;
+ __u32 date_stop;
+ __u32 daytime_start;
+ __u32 daytime_stop;
+ __u32 monthdays_match;
+ __u8 weekdays_match;
+ __u8 flags;
};
enum {
diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h
index 9947f56cdbdd..04d1bfea03c2 100644
--- a/include/linux/netfilter/xt_u32.h
+++ b/include/linux/netfilter/xt_u32.h
@@ -1,6 +1,8 @@
#ifndef _XT_U32_H
#define _XT_U32_H 1
+#include <linux/types.h>
+
enum xt_u32_ops {
XT_U32_AND,
XT_U32_LEFTSH,
@@ -9,13 +11,13 @@ enum xt_u32_ops {
};
struct xt_u32_location_element {
- u_int32_t number;
- u_int8_t nextop;
+ __u32 number;
+ __u8 nextop;
};
struct xt_u32_value_element {
- u_int32_t min;
- u_int32_t max;
+ __u32 min;
+ __u32 max;
};
/*
@@ -27,14 +29,14 @@ struct xt_u32_value_element {
struct xt_u32_test {
struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
- u_int8_t nnums;
- u_int8_t nvalues;
+ __u8 nnums;
+ __u8 nvalues;
};
struct xt_u32 {
struct xt_u32_test tests[XT_U32_MAXSIZE+1];
- u_int8_t ntests;
- u_int8_t invert;
+ __u8 ntests;
+ __u8 invert;
};
#endif /* _XT_U32_H */
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index c73ef0b18bdc..be5be1577a56 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_802_3_H
#define __LINUX_BRIDGE_EBT_802_3_H
+#include <linux/types.h>
+
#define EBT_802_3_SAP 0x01
#define EBT_802_3_TYPE 0x02
@@ -24,24 +26,24 @@
/* ui has one byte ctrl, ni has two */
struct hdr_ui {
- uint8_t dsap;
- uint8_t ssap;
- uint8_t ctrl;
- uint8_t orig[3];
+ __u8 dsap;
+ __u8 ssap;
+ __u8 ctrl;
+ __u8 orig[3];
__be16 type;
};
struct hdr_ni {
- uint8_t dsap;
- uint8_t ssap;
+ __u8 dsap;
+ __u8 ssap;
__be16 ctrl;
- uint8_t orig[3];
+ __u8 orig[3];
__be16 type;
};
struct ebt_802_3_hdr {
- uint8_t daddr[6];
- uint8_t saddr[6];
+ __u8 daddr[6];
+ __u8 saddr[6];
__be16 len;
union {
struct hdr_ui ui;
@@ -59,10 +61,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
#endif
struct ebt_802_3_info {
- uint8_t sap;
+ __u8 sap;
__be16 type;
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 0009558609a7..bd4e3ad0b706 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_AMONG_H
#define __LINUX_BRIDGE_EBT_AMONG_H
+#include <linux/types.h>
+
#define EBT_AMONG_DST 0x01
#define EBT_AMONG_SRC 0x02
@@ -30,7 +32,7 @@
*/
struct ebt_mac_wormhash_tuple {
- uint32_t cmp[2];
+ __u32 cmp[2];
__be32 ip;
};
diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h
index cbf4843b6b0f..522f3e427f49 100644
--- a/include/linux/netfilter_bridge/ebt_arp.h
+++ b/include/linux/netfilter_bridge/ebt_arp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_ARP_H
#define __LINUX_BRIDGE_EBT_ARP_H
+#include <linux/types.h>
+
#define EBT_ARP_OPCODE 0x01
#define EBT_ARP_HTYPE 0x02
#define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@ struct ebt_arp_info
unsigned char smmsk[ETH_ALEN];
unsigned char dmaddr[ETH_ALEN];
unsigned char dmmsk[ETH_ALEN];
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index 6a708fb92241..c4bbc41b0ea4 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -15,6 +15,8 @@
#ifndef __LINUX_BRIDGE_EBT_IP_H
#define __LINUX_BRIDGE_EBT_IP_H
+#include <linux/types.h>
+
#define EBT_IP_SOURCE 0x01
#define EBT_IP_DEST 0x02
#define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@ struct ebt_ip_info {
__be32 daddr;
__be32 smsk;
__be32 dmsk;
- uint8_t tos;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tos;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ __u16 sport[2];
+ __u16 dport[2];
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
index e5de98701519..42b889682721 100644
--- a/include/linux/netfilter_bridge/ebt_ip6.h
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -12,14 +12,19 @@
#ifndef __LINUX_BRIDGE_EBT_IP6_H
#define __LINUX_BRIDGE_EBT_IP6_H
+#include <linux/types.h>
+
#define EBT_IP6_SOURCE 0x01
#define EBT_IP6_DEST 0x02
#define EBT_IP6_TCLASS 0x04
#define EBT_IP6_PROTO 0x08
#define EBT_IP6_SPORT 0x10
#define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
- EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+ EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+ EBT_IP6_ICMP6)
#define EBT_IP6_MATCH "ip6"
/* the same values are used for the invflags */
@@ -28,12 +33,18 @@ struct ebt_ip6_info {
struct in6_addr daddr;
struct in6_addr smsk;
struct in6_addr dmsk;
- uint8_t tclass;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tclass;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ union {
+ __u16 sport[2];
+ __u8 icmpv6_type[2];
+ };
+ union {
+ __u16 dport[2];
+ __u8 icmpv6_code[2];
+ };
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h
index 4bf76b751676..66d80b30ba0e 100644
--- a/include/linux/netfilter_bridge/ebt_limit.h
+++ b/include/linux/netfilter_bridge/ebt_limit.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LIMIT_H
#define __LINUX_BRIDGE_EBT_LIMIT_H
+#include <linux/types.h>
+
#define EBT_LIMIT_MATCH "limit"
/* timings are in milliseconds. */
@@ -10,13 +12,13 @@
seconds, or one every 59 hours. */
struct ebt_limit_info {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* Used internally by the kernel */
unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
+ __u32 credit;
+ __u32 credit_cap, cost;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index cc2cdfb764bc..7e7f1d1fe494 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LOG_H
#define __LINUX_BRIDGE_EBT_LOG_H
+#include <linux/types.h>
+
#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
#define EBT_LOG_ARP 0x02
#define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
#define EBT_LOG_WATCHER "log"
struct ebt_log_info {
- uint8_t loglevel;
- uint8_t prefix[EBT_LOG_PREFIX_SIZE];
- uint32_t bitmask;
+ __u8 loglevel;
+ __u8 prefix[EBT_LOG_PREFIX_SIZE];
+ __u32 bitmask;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h
index 9ceb10ec0ed6..410f9e5a71d4 100644
--- a/include/linux/netfilter_bridge/ebt_mark_m.h
+++ b/include/linux/netfilter_bridge/ebt_mark_m.h
@@ -1,13 +1,15 @@
#ifndef __LINUX_BRIDGE_EBT_MARK_M_H
#define __LINUX_BRIDGE_EBT_MARK_M_H
+#include <linux/types.h>
+
#define EBT_MARK_AND 0x01
#define EBT_MARK_OR 0x02
#define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
struct ebt_mark_m_info {
unsigned long mark, mask;
- uint8_t invert;
- uint8_t bitmask;
+ __u8 invert;
+ __u8 bitmask;
};
#define EBT_MARK_MATCH "mark_m"
diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h
index 052817849b83..df829fce9125 100644
--- a/include/linux/netfilter_bridge/ebt_nflog.h
+++ b/include/linux/netfilter_bridge/ebt_nflog.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
#define __LINUX_BRIDGE_EBT_NFLOG_H
+#include <linux/types.h>
+
#define EBT_NFLOG_MASK 0x0
#define EBT_NFLOG_PREFIX_SIZE 64
@@ -10,11 +12,11 @@
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
struct ebt_nflog_info {
- u_int32_t len;
- u_int16_t group;
- u_int16_t threshold;
- u_int16_t flags;
- u_int16_t pad;
+ __u32 len;
+ __u16 group;
+ __u16 threshold;
+ __u16 flags;
+ __u16 pad;
char prefix[EBT_NFLOG_PREFIX_SIZE];
};
diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h
index 51a799840931..c241badcd036 100644
--- a/include/linux/netfilter_bridge/ebt_pkttype.h
+++ b/include/linux/netfilter_bridge/ebt_pkttype.h
@@ -1,9 +1,11 @@
#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
#define __LINUX_BRIDGE_EBT_PKTTYPE_H
+#include <linux/types.h>
+
struct ebt_pkttype_info {
- uint8_t pkt_type;
- uint8_t invert;
+ __u8 pkt_type;
+ __u8 invert;
};
#define EBT_PKTTYPE_MATCH "pkttype"
diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h
index e503a0aa2728..1025b9f5fb7d 100644
--- a/include/linux/netfilter_bridge/ebt_stp.h
+++ b/include/linux/netfilter_bridge/ebt_stp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_STP_H
#define __LINUX_BRIDGE_EBT_STP_H
+#include <linux/types.h>
+
#define EBT_STP_TYPE 0x0001
#define EBT_STP_FLAGS 0x0002
@@ -21,24 +23,24 @@
#define EBT_STP_MATCH "stp"
struct ebt_stp_config_info {
- uint8_t flags;
- uint16_t root_priol, root_priou;
+ __u8 flags;
+ __u16 root_priol, root_priou;
char root_addr[6], root_addrmsk[6];
- uint32_t root_costl, root_costu;
- uint16_t sender_priol, sender_priou;
+ __u32 root_costl, root_costu;
+ __u16 sender_priol, sender_priou;
char sender_addr[6], sender_addrmsk[6];
- uint16_t portl, portu;
- uint16_t msg_agel, msg_ageu;
- uint16_t max_agel, max_ageu;
- uint16_t hello_timel, hello_timeu;
- uint16_t forward_delayl, forward_delayu;
+ __u16 portl, portu;
+ __u16 msg_agel, msg_ageu;
+ __u16 max_agel, max_ageu;
+ __u16 hello_timel, hello_timeu;
+ __u16 forward_delayl, forward_delayu;
};
struct ebt_stp_info {
- uint8_t type;
+ __u8 type;
struct ebt_stp_config_info config;
- uint16_t bitmask;
- uint16_t invflags;
+ __u16 bitmask;
+ __u16 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ulog.h b/include/linux/netfilter_bridge/ebt_ulog.h
index b677e2671541..89a6becb5269 100644
--- a/include/linux/netfilter_bridge/ebt_ulog.h
+++ b/include/linux/netfilter_bridge/ebt_ulog.h
@@ -1,6 +1,8 @@
#ifndef _EBT_ULOG_H
#define _EBT_ULOG_H
+#include <linux/types.h>
+
#define EBT_ULOG_DEFAULT_NLGROUP 0
#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
#define EBT_ULOG_VERSION 1
struct ebt_ulog_info {
- uint32_t nlgroup;
+ __u32 nlgroup;
unsigned int cprange;
unsigned int qthreshold;
char prefix[EBT_ULOG_PREFIX_LEN];
diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h
index 1d98be4031e7..967d1d5cf98d 100644
--- a/include/linux/netfilter_bridge/ebt_vlan.h
+++ b/include/linux/netfilter_bridge/ebt_vlan.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_VLAN_H
#define __LINUX_BRIDGE_EBT_VLAN_H
+#include <linux/types.h>
+
#define EBT_VLAN_ID 0x01
#define EBT_VLAN_PRIO 0x02
#define EBT_VLAN_ENCAP 0x04
@@ -8,12 +10,12 @@
#define EBT_VLAN_MATCH "vlan"
struct ebt_vlan_info {
- uint16_t id; /* VLAN ID {1-4095} */
- uint8_t prio; /* VLAN User Priority {0-7} */
+ __u16 id; /* VLAN ID {1-4095} */
+ __u8 prio; /* VLAN User Priority {0-7} */
__be16 encap; /* VLAN Encapsulated frame code {0-65535} */
- uint8_t bitmask; /* Args bitmask bit 1=1 - ID arg,
+ __u8 bitmask; /* Args bitmask bit 1=1 - ID arg,
bit 2=1 User-Priority arg, bit 3=1 encap*/
- uint8_t invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
+ __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
bit 2=1 - inversed Pirority arg */
};
diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index e5a3687c8a72..c6a204c97047 100644
--- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -1,6 +1,8 @@
#ifndef _IPT_CLUSTERIP_H_target
#define _IPT_CLUSTERIP_H_target
+#include <linux/types.h>
+
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@ struct clusterip_config;
struct ipt_clusterip_tgt_info {
- u_int32_t flags;
+ __u32 flags;
/* only relevant for new ones */
- u_int8_t clustermac[6];
- u_int16_t num_total_nodes;
- u_int16_t num_local_nodes;
- u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
- u_int32_t hash_mode;
- u_int32_t hash_initval;
+ __u8 clustermac[6];
+ __u16 num_total_nodes;
+ __u16 num_local_nodes;
+ __u16 local_nodes[CLUSTERIP_MAX_NODES];
+ __u32 hash_mode;
+ __u32 hash_initval;
/* Used internally by the kernel */
struct clusterip_config *config;
diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h
index 7ca45918ab8e..bb88d5315a4d 100644
--- a/include/linux/netfilter_ipv4/ipt_ECN.h
+++ b/include/linux/netfilter_ipv4/ipt_ECN.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_TARGET_H
#define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_DSCP.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -19,11 +21,11 @@
#define IPT_ECN_OP_MASK 0xce
struct ipt_ECN_info {
- u_int8_t operation; /* bitset of operations */
- u_int8_t ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
+ __u8 operation; /* bitset of operations */
+ __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
union {
struct {
- u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+ __u8 ece:1, cwr:1; /* TCP ECT bits */
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
index 2529660c5b38..5bca78267afd 100644
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ b/include/linux/netfilter_ipv4/ipt_SAME.h
@@ -1,15 +1,17 @@
#ifndef _IPT_SAME_H
#define _IPT_SAME_H
+#include <linux/types.h>
+
#define IPT_SAME_MAX_RANGE 10
#define IPT_SAME_NODST 0x01
struct ipt_same_info {
unsigned char info;
- u_int32_t rangesize;
- u_int32_t ipnum;
- u_int32_t *iparray;
+ __u32 rangesize;
+ __u32 ipnum;
+ __u32 *iparray;
/* hangs off end. */
struct nf_nat_range range[IPT_SAME_MAX_RANGE];
diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h
index ee6611edc112..f6ac169d92f9 100644
--- a/include/linux/netfilter_ipv4/ipt_TTL.h
+++ b/include/linux/netfilter_ipv4/ipt_TTL.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_SET = 0,
IPT_TTL_INC,
@@ -13,8 +15,8 @@ enum {
#define IPT_TTL_MAXMODE IPT_TTL_DEC
struct ipt_TTL_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
index 446de6aef983..0da42237c8da 100644
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ b/include/linux/netfilter_ipv4/ipt_addrtype.h
@@ -1,6 +1,8 @@
#ifndef _IPT_ADDRTYPE_H
#define _IPT_ADDRTYPE_H
+#include <linux/types.h>
+
enum {
IPT_ADDRTYPE_INVERT_SOURCE = 0x0001,
IPT_ADDRTYPE_INVERT_DEST = 0x0002,
@@ -9,17 +11,17 @@ enum {
};
struct ipt_addrtype_info_v1 {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t flags;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 flags;
};
/* revision 0 */
struct ipt_addrtype_info {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t invert_source;
- u_int32_t invert_dest;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 invert_source;
+ __u32 invert_dest;
};
#endif
diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h
index 2e555b4d05e3..4e02bb0119e3 100644
--- a/include/linux/netfilter_ipv4/ipt_ah.h
+++ b/include/linux/netfilter_ipv4/ipt_ah.h
@@ -1,9 +1,11 @@
#ifndef _IPT_AH_H
#define _IPT_AH_H
+#include <linux/types.h>
+
struct ipt_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u8 invflags; /* Inverse flags */
};
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index 9945baa4ccd7..eabf95fb7d3e 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_H
#define _IPT_ECN_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_dscp.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -20,12 +22,12 @@
/* match info */
struct ipt_ecn_info {
- u_int8_t operation;
- u_int8_t invert;
- u_int8_t ip_ect;
+ __u8 operation;
+ __u8 invert;
+ __u8 ip_ect;
union {
struct {
- u_int8_t ect;
+ __u8 ect;
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_ttl.h b/include/linux/netfilter_ipv4/ipt_ttl.h
index ee24fd86a3aa..37bee4442486 100644
--- a/include/linux/netfilter_ipv4/ipt_ttl.h
+++ b/include/linux/netfilter_ipv4/ipt_ttl.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_EQ = 0, /* equals */
IPT_TTL_NE, /* not equals */
@@ -13,8 +15,8 @@ enum {
struct ipt_ttl_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h
index afb7813d45ab..ebd8ead1bb63 100644
--- a/include/linux/netfilter_ipv6/ip6t_HL.h
+++ b/include/linux/netfilter_ipv6/ip6t_HL.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_SET = 0,
IP6T_HL_INC,
@@ -14,8 +16,8 @@ enum {
#define IP6T_HL_MAXMODE IP6T_HL_DEC
struct ip6t_HL_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h
index 6be6504162bb..205ed62e4605 100644
--- a/include/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -1,6 +1,8 @@
#ifndef _IP6T_REJECT_H
#define _IP6T_REJECT_H
+#include <linux/types.h>
+
enum ip6t_reject_with {
IP6T_ICMP6_NO_ROUTE,
IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@ enum ip6t_reject_with {
};
struct ip6t_reject_info {
- u_int32_t with; /* reject type */
+ __u32 with; /* reject type */
};
#endif /*_IP6T_REJECT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index 17a745cfb2c7..5da2b65cb3ad 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_AH_H
#define _IP6T_AH_H
+#include <linux/types.h>
+
struct ip6t_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t hdrres; /* Test of the Reserved Filed */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 hdrres; /* Test of the Reserved Filed */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_AH_SPI 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 3724d0850920..b47f61b9e082 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_FRAG_H
#define _IP6T_FRAG_H
+#include <linux/types.h>
+
struct ip6t_frag {
- u_int32_t ids[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 ids[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_FRAG_IDS 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_hl.h b/include/linux/netfilter_ipv6/ip6t_hl.h
index 5ef91b8319a8..6e76dbc6c19a 100644
--- a/include/linux/netfilter_ipv6/ip6t_hl.h
+++ b/include/linux/netfilter_ipv6/ip6t_hl.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_EQ = 0, /* equals */
IP6T_HL_NE, /* not equals */
@@ -14,8 +16,8 @@ enum {
struct ip6t_hl_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
index 01dfd445596a..efae3a20c214 100644
--- a/include/linux/netfilter_ipv6/ip6t_ipv6header.h
+++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
@@ -8,10 +8,12 @@ on whether they contain certain headers */
#ifndef __IPV6HEADER_H
#define __IPV6HEADER_H
+#include <linux/types.h>
+
struct ip6t_ipv6header_info {
- u_int8_t matchflags;
- u_int8_t invflags;
- u_int8_t modeflag;
+ __u8 matchflags;
+ __u8 invflags;
+ __u8 modeflag;
};
#define MASK_HOPOPTS 128
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
index 18549bca2d1f..a7729a5025cd 100644
--- a/include/linux/netfilter_ipv6/ip6t_mh.h
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -1,10 +1,12 @@
#ifndef _IP6T_MH_H
#define _IP6T_MH_H
+#include <linux/types.h>
+
/* MH matching stuff */
struct ip6t_mh {
- u_int8_t types[2]; /* MH type range */
- u_int8_t invflags; /* Inverse flags */
+ __u8 types[2]; /* MH type range */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "invflags" field in struct ip6t_mh. */
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index 62d89bcd9f9c..17d419a811fd 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -1,14 +1,16 @@
#ifndef _IP6T_OPTS_H
#define _IP6T_OPTS_H
+#include <linux/types.h>
+
#define IP6T_OPTS_OPTSNR 16
struct ip6t_opts {
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
- u_int16_t opts[IP6T_OPTS_OPTSNR]; /* opts */
- u_int8_t optsnr; /* Nr of OPts */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
+ __u16 opts[IP6T_OPTS_OPTSNR]; /* opts */
+ __u8 optsnr; /* Nr of OPts */
};
#define IP6T_OPTS_LEN 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index ab91bfd2cd00..7605a5ff81cd 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -1,18 +1,19 @@
#ifndef _IP6T_RT_H
#define _IP6T_RT_H
+#include <linux/types.h>
/*#include <linux/in6.h>*/
#define IP6T_RT_HOPS 16
struct ip6t_rt {
- u_int32_t rt_type; /* Routing Type */
- u_int32_t segsleft[2]; /* Segments Left */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 rt_type; /* Routing Type */
+ __u32 segsleft[2]; /* Segments Left */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
struct in6_addr addrs[IP6T_RT_HOPS]; /* Hops */
- u_int8_t addrnr; /* Nr of Addresses */
+ __u8 addrnr; /* Nr of Addresses */
};
#define IP6T_RT_TYP 0x01
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 2cfa4bc8dea6..d4bb6f58c90c 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -247,6 +247,35 @@ struct tc_gred_sopt {
__u16 pad1;
};
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
/* HTB section */
#define TC_HTB_NUMPRIO 8
#define TC_HTB_MAXDEPTH 8
@@ -481,4 +510,16 @@ struct tc_drr_stats {
__u32 deficit;
};
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 94c1f03b50eb..9a85412e0db6 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -322,9 +322,12 @@ struct dquot_operations {
qsize_t *(*get_reserved_space) (struct inode *);
};
+struct path;
+
/* Operations handling requests from userspace */
struct quotactl_ops {
- int (*quota_on)(struct super_block *, int, int, char *);
+ int (*quota_on)(struct super_block *, int, int, struct path *);
+ int (*quota_on_meta)(struct super_block *, int, int);
int (*quota_off)(struct super_block *, int);
int (*quota_sync)(struct super_block *, int, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 223b14cd129c..eb354f6f26b3 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -76,11 +76,9 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
-int dquot_quota_on(struct super_block *sb, int type, int format_id,
- char *path);
int dquot_enable(struct inode *inode, int type, int format_id,
unsigned int flags);
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
struct path *path);
int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3c995b4d742c..a0b639f8e805 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -235,8 +235,6 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc,
struct rtc_task *task, int freq);
extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
-extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
- unsigned int enabled);
void rtc_aie_update_irq(void *private);
void rtc_uie_update_irq(void *private);
@@ -246,8 +244,6 @@ int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
ktime_t expires, ktime_t period);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf221d65d9ad..31f02d0b46a7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1801,6 +1801,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
skb = skb->prev)
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
static inline bool skb_has_frag_list(const struct sk_buff *skb)
{
@@ -1868,7 +1877,7 @@ extern void skb_split(struct sk_buff *skb,
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 0093dd7c1d6f..800617b4ddd5 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -109,7 +109,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
- MAYBE_BUILD_BUG_ON(fbit >= 32);
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 32);
+ else
+ BUG_ON(fbit >= 32);
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
diff --git a/include/media/mt9v011.h b/include/media/mt9v011.h
new file mode 100644
index 000000000000..ea29fc74cd06
--- /dev/null
+++ b/include/media/mt9v011.h
@@ -0,0 +1,17 @@
+/* mt9v011 sensor
+ *
+ * Copyright (C) 2011 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MT9V011_H__
+#define __MT9V011_H__
+
+struct mt9v011_platform_data {
+ unsigned xtal; /* Hz */
+};
+
+#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index a23c1fc685a1..2963263f31e2 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -183,6 +183,9 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
}
#define IR_MAX_DURATION 0xFFFFFFFF /* a bit more than 4 seconds */
+#define US_TO_NS(usec) ((usec) * 1000)
+#define MS_TO_US(msec) ((msec) * 1000)
+#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index ac7ce00f39cf..79827143d5ac 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -115,7 +115,7 @@ struct saa7146_dev
/* different device locks */
spinlock_t slock;
- struct mutex lock;
+ struct mutex v4l2_lock;
unsigned char __iomem *mem; /* pointer to mapped IO memory */
u32 revision; /* chip revision; needed for bug-workarounds*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 2d65b35cdab2..a659319e8582 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -138,21 +138,10 @@ struct v4l2_subdev_ops;
/* Load an i2c module and return an initialized v4l2_subdev struct.
The client_type argument is the name of the chip that's on the adapter. */
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter, const char *client_type,
- int irq, void *platform_data,
u8 addr, const unsigned short *probe_addrs);
-/* Load an i2c module and return an initialized v4l2_subdev struct.
- The client_type argument is the name of the chip that's on the adapter. */
-static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, const char *client_type,
- u8 addr, const unsigned short *probe_addrs)
-{
- return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, client_type, 0, NULL,
- addr, probe_addrs);
-}
-
struct i2c_board_info;
struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index d69ab4aae032..97d063837b61 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/device.h>
+#include <linux/videodev2.h>
/* forward references */
struct v4l2_ctrl_handler;
@@ -53,8 +54,10 @@ struct v4l2_ctrl_ops {
* @handler: The handler that owns the control.
* @cluster: Point to start of cluster array.
* @ncontrols: Number of controls in cluster array.
- * @has_new: Internal flag: set when there is a valid new value.
* @done: Internal flag: set for each processed control.
+ * @is_new: Set when the user specified a new value for this control. It
+ * is also set when called from v4l2_ctrl_handler_setup. Drivers
+ * should never set this flag.
* @is_private: If set, then this control is private to its handler and it
* will not be added to any other handlers. Drivers can set
* this flag.
@@ -97,9 +100,9 @@ struct v4l2_ctrl {
struct v4l2_ctrl_handler *handler;
struct v4l2_ctrl **cluster;
unsigned ncontrols;
- unsigned int has_new:1;
unsigned int done:1;
+ unsigned int is_new:1;
unsigned int is_private:1;
unsigned int is_volatile:1;
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b0316a7cf08d..daf1e57d9b26 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -106,10 +106,7 @@ struct v4l2_subdev_io_pin_config {
u8 strength; /* Pin drive strength */
};
-/* s_config: if set, then it is always called by the v4l2_i2c_new_subdev*
- functions after the v4l2_subdev was registered. It is used to pass
- platform data to the subdev which can be used during initialization.
-
+/*
s_io_pin_config: configure one or more chip I/O pins for chips that
multiplex different internal signal pads out to IO pins. This function
takes a pointer to an array of 'n' pin configuration entries, one for
@@ -141,7 +138,6 @@ struct v4l2_subdev_io_pin_config {
struct v4l2_subdev_core_ops {
int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
int (*log_status)(struct v4l2_subdev *sd);
- int (*s_config)(struct v4l2_subdev *sd, int irq, void *platform_data);
int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n,
struct v4l2_subdev_io_pin_config *pincfg);
int (*init)(struct v4l2_subdev *sd, u32 val);
@@ -415,6 +411,21 @@ struct v4l2_subdev_ops {
const struct v4l2_subdev_sensor_ops *sensor;
};
+/*
+ * Internal ops. Never call this from drivers, only the v4l2 framework can call
+ * these ops.
+ *
+ * registered: called when this subdev is registered. When called the v4l2_dev
+ * field is set to the correct v4l2_device.
+ *
+ * unregistered: called when this subdev is unregistered. When called the
+ * v4l2_dev field is still set to the correct v4l2_device.
+ */
+struct v4l2_subdev_internal_ops {
+ int (*registered)(struct v4l2_subdev *sd);
+ void (*unregistered)(struct v4l2_subdev *sd);
+};
+
#define V4L2_SUBDEV_NAME_SIZE 32
/* Set this flag if this subdev is a i2c device. */
@@ -431,6 +442,8 @@ struct v4l2_subdev {
u32 flags;
struct v4l2_device *v4l2_dev;
const struct v4l2_subdev_ops *ops;
+ /* Never call these internal ops from within a driver! */
+ const struct v4l2_subdev_internal_ops *internal_ops;
/* The control handler of this subdev. May be NULL. */
struct v4l2_ctrl_handler *ctrl_handler;
/* name must be unique */
diff --git a/include/net/dst.h b/include/net/dst.h
index 93b0310317be..484f80b69ada 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -40,24 +40,10 @@ struct dst_entry {
struct rcu_head rcu_head;
struct dst_entry *child;
struct net_device *dev;
- short error;
- short obsolete;
- int flags;
-#define DST_HOST 0x0001
-#define DST_NOXFRM 0x0002
-#define DST_NOPOLICY 0x0004
-#define DST_NOHASH 0x0008
-#define DST_NOCACHE 0x0010
+ struct dst_ops *ops;
+ unsigned long _metrics;
unsigned long expires;
-
- unsigned short header_len; /* more space at head required */
- unsigned short trailer_len; /* space to reserve at tail */
-
- unsigned int rate_tokens;
- unsigned long rate_last; /* rate limiting for ICMP */
-
struct dst_entry *path;
-
struct neighbour *neighbour;
struct hh_cache *hh;
#ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@ struct dst_entry {
int (*input)(struct sk_buff*);
int (*output)(struct sk_buff*);
- struct dst_ops *ops;
-
- u32 _metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+ short error;
+ short obsolete;
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 tclassid;
#else
__u32 __pad2;
#endif
-
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,14 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+ unsigned long rate_last; /* rate limiting for ICMP */
+ unsigned int rate_tokens;
+ int flags;
+#define DST_HOST 0x0001
+#define DST_NOXFRM 0x0002
+#define DST_NOPOLICY 0x0004
+#define DST_NOHASH 0x0008
+#define DST_NOCACHE 0x0010
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
@@ -103,10 +96,70 @@ struct dst_entry {
#ifdef __KERNEL__
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY 0x1UL
+#define __DST_METRICS_PTR(Y) \
+ ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+ return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+ unsigned long val = dst->_metrics;
+ if (!(val & DST_METRICS_READ_ONLY))
+ __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+ unsigned long p = dst->_metrics;
+
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+ const u32 *src_metrics,
+ bool read_only)
+{
+ dst->_metrics = ((unsigned long) src_metrics) |
+ (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+ u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+ if (dst_metrics) {
+ u32 *src_metrics = DST_METRICS_PTR(src);
+
+ memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+ }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+ return DST_METRICS_PTR(dst);
+}
+
static inline u32
dst_metric_raw(const struct dst_entry *dst, const int metric)
{
- return dst->_metrics[metric-1];
+ u32 *p = DST_METRICS_PTR(dst);
+
+ return p[metric-1];
}
static inline u32
@@ -131,22 +184,10 @@ dst_metric_advmss(const struct dst_entry *dst)
static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
{
- dst->_metrics[metric-1] = val;
-}
-
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
- memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
+ u32 *p = dst_metrics_write_ptr(dst);
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
- dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
- return dst->_metrics;
+ if (p)
+ p[metric-1] = val;
}
static inline u32
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 21a320b8708e..dc0746328947 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -18,6 +18,7 @@ struct dst_ops {
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*default_mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
diff --git a/include/net/flow.h b/include/net/flow.h
index 240b7f356c71..1ae901f24436 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -48,7 +48,8 @@ struct flowi {
__u8 proto;
__u8 flags;
-#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_PRECOW_METRICS 0x02
union {
struct {
__be16 sport;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8181498fa96c..6e6dfd757682 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -219,7 +219,13 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
- return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+ __u8 flags = 0;
+
+ if (inet_sk(sk)->transparent)
+ flags |= FLOWI_FLAG_ANYSRC;
+ if (sk->sk_protocol == IPPROTO_TCP)
+ flags |= FLOWI_FLAG_PRECOW_METRICS;
+ return flags;
}
#endif /* _INET_SOCK_H */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 599d96e74114..61f2c66edb2a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
#include <net/ipv6.h>
#include <asm/atomic.h>
@@ -33,8 +34,8 @@ struct inet_peer {
atomic_t refcnt;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
- * We can share memory with rcu_head to keep inet_peer small
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+ * We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
@@ -42,6 +43,7 @@ struct inet_peer {
atomic_t ip_id_count; /* IP ID for the next packet */
__u32 tcp_ts;
__u32 tcp_ts_stamp;
+ u32 metrics[RTAX_MAX];
};
struct rcu_head rcu;
};
@@ -49,6 +51,13 @@ struct inet_peer {
void inet_initpeers(void) __init;
+#define INETPEER_METRICS_NEW (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+ return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 07bdb5e9e8ac..08b46b8c3031 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -55,7 +55,7 @@ struct fib_nh {
int nh_weight;
int nh_power;
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 nh_tclassid;
#endif
int nh_oif;
@@ -77,7 +77,7 @@ struct fib_info {
int fib_protocol;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 fib_metrics[RTAX_MAX];
+ u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
#define fib_window fib_metrics[RTAX_WINDOW-1]
#define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +96,15 @@ struct fib_info {
struct fib_rule;
#endif
+struct fib_table;
struct fib_result {
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
struct fib_info *fi;
+ struct fib_table *table;
+ struct list_head *fa_head;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule *r;
#endif
@@ -155,9 +158,6 @@ extern int fib_table_delete(struct fib_table *, struct fib_config *);
extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
- const struct flowi *flp,
- struct fib_result *res);
extern void fib_free_table(struct fib_table *tb);
@@ -201,7 +201,7 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
extern int __net_init fib4_rules_init(struct net *net);
extern void __net_exit fib4_rules_exit(struct net *net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
extern u32 fib_rules_tclass(struct fib_result *res);
#endif
@@ -218,8 +218,7 @@ extern void ip_fib_init(void);
extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
struct net_device *dev, __be32 *spec_dst,
u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
- struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -229,13 +228,13 @@ extern int fib_sync_up(struct net_device *dev);
extern __be32 __fib_res_prefsrc(struct fib_result *res);
extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag;
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index b7bbd6c28cfa..5d75feadf4f4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -28,6 +28,80 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
+#include <net/net_namespace.h> /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+ return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /*
+ * This is used for debug only.
+ * Start with the most likely hit
+ * End with BUG
+ */
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ if (skb_dst(skb)->dev)
+ return dev_net(skb_dst(skb)->dev);
+ WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /* Start with the most likely hit */
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return sock_net(skb->sk);
+#endif
+#else
+ return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@ struct ip_vs_seq {
before last resized pkt */
};
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters ustats;
+ struct u64_stats_sync syncp;
+};
/*
* IPVS statistics objects
@@ -279,17 +370,34 @@ struct ip_vs_estimator {
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_stats_user ustats; /* statistics */
struct ip_vs_estimator est; /* estimator */
-
- spinlock_t lock; /* spin lock */
+ struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
+ spinlock_t lock; /* spin lock */
};
+/*
+ * Helper Macros for per cpu
+ * ipvs->tot_stats->ustats.count
+ */
+#define IPVS_STAT_INC(ipvs, count) \
+ __this_cpu_inc((ipvs)->ustats->count)
+
+#define IPVS_STAT_ADD(ipvs, count, value) \
+ do {\
+ write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ __this_cpu_add((ipvs)->ustats->count, value); \
+ write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ } while (0)
+
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
struct ip_vs_app;
struct sk_buff;
+struct ip_vs_proto_data;
struct ip_vs_protocol {
struct ip_vs_protocol *next;
@@ -297,21 +405,22 @@ struct ip_vs_protocol {
u16 protocol;
u16 num_states;
int dont_defrag;
- atomic_t appcnt; /* counter of proto app incs */
- int *timeout_table; /* protocol timeout table */
void (*init)(struct ip_vs_protocol *pp);
void (*exit)(struct ip_vs_protocol *pp);
+ void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+ void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
int (*conn_schedule)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp,
+ struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp);
struct ip_vs_conn *
(*conn_in_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -319,7 +428,6 @@ struct ip_vs_protocol {
struct ip_vs_conn *
(*conn_out_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -337,11 +445,11 @@ struct ip_vs_protocol {
int (*state_transition)(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
- int (*register_app)(struct ip_vs_app *inc);
+ int (*register_app)(struct net *net, struct ip_vs_app *inc);
- void (*unregister_app)(struct ip_vs_app *inc);
+ void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
int (*app_conn_bind)(struct ip_vs_conn *cp);
@@ -350,14 +458,26 @@ struct ip_vs_protocol {
int offset,
const char *msg);
- void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+ void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
- int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table; /* protocol timeout table */
+ atomic_t appcnt; /* counter of proto app incs. */
+ struct tcp_states_t *tcp_state_table;
};
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+ unsigned short proto);
struct ip_vs_conn_param {
+ struct net *net;
const union nf_inet_addr *caddr;
const union nf_inet_addr *vaddr;
__be16 cport;
@@ -375,16 +495,19 @@ struct ip_vs_conn_param {
*/
struct ip_vs_conn {
struct list_head c_list; /* hashed list heads */
-
+#ifdef CONFIG_NET_NS
+ struct net *net; /* Name space */
+#endif
/* Protocol, addresses and port numbers */
- u16 af; /* address family */
- union nf_inet_addr caddr; /* client address */
- union nf_inet_addr vaddr; /* virtual address */
- union nf_inet_addr daddr; /* destination address */
- volatile __u32 flags; /* status flags */
- __be16 cport;
- __be16 vport;
- __be16 dport;
+ u16 af; /* address family */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __u32 fwmark; /* Fire wall mark from skb */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ union nf_inet_addr daddr; /* destination address */
+ volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
/* counter and timer */
@@ -422,10 +545,38 @@ struct ip_vs_conn {
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ const struct ip_vs_pe *pe;
char *pe_data;
__u8 pe_data_len;
};
+/*
+ * To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net;
+#else
+ return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+ struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net == net;
+#else
+ return 1;
+#endif
+}
/*
* Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +636,7 @@ struct ip_vs_service {
unsigned flags; /* service status flags */
unsigned timeout; /* persistent timeout in ticks */
__be32 netmask; /* grouping granularity */
+ struct net *net;
struct list_head destinations; /* real server d-linked list */
__u32 num_dests; /* number of servers */
@@ -510,8 +662,8 @@ struct ip_vs_dest {
struct list_head d_list; /* for table with all the dests */
u16 af; /* address family */
- union nf_inet_addr addr; /* IP address of the server */
__be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
volatile unsigned flags; /* dest status flags */
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
@@ -538,8 +690,8 @@ struct ip_vs_dest {
/* for virtual service */
struct ip_vs_service *svc; /* service it belongs to */
__u16 protocol; /* which protocol (TCP/UDP) */
- union nf_inet_addr vaddr; /* virtual IP address */
__be16 vport; /* virtual port number */
+ union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
};
@@ -674,13 +826,14 @@ enum {
IP_VS_DIR_LAST,
};
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
const union nf_inet_addr *caddr,
__be16 cport,
const union nf_inet_addr *vaddr,
__be16 vport,
struct ip_vs_conn_param *p)
{
+ p->net = net;
p->af = af;
p->protocol = protocol;
p->caddr = caddr;
@@ -695,7 +848,6 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -703,7 +855,6 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -719,14 +870,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr,
__be16 dport, unsigned flags,
- struct ip_vs_dest *dest);
+ struct ip_vs_dest *dest, __u32 fwmark);
extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
extern const char * ip_vs_state_name(__u16 proto, int state);
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
extern int ip_vs_conn_init(void);
extern void ip_vs_conn_cleanup(void);
@@ -796,12 +947,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
* (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+ __u16 proto, __u16 port);
extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -814,15 +965,27 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
void ip_vs_unbind_pe(struct ip_vs_service *svc);
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ __module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ module_put(pe->module);
+}
/*
* IPVS protocol functions (from ip_vs_proto.c)
*/
extern int ip_vs_protocol_init(void);
extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
extern int *ip_vs_create_timeout_table(int *table, int size);
extern int
ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1015,21 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
extern struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored);
+ struct ip_vs_proto_data *pd, int *ignored);
extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
/*
* IPVS control data and functions (from ip_vs_ctl.c)
*/
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
extern struct ip_vs_stats ip_vs_stats;
extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport);
static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1038,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
}
extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
extern int ip_vs_use_count_inc(void);
@@ -888,8 +1046,9 @@ extern void ip_vs_use_count_dec(void);
extern int ip_vs_control_init(void);
extern void ip_vs_control_cleanup(void);
extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
- const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+ __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ __u16 protocol, __u32 fwmark);
extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -897,14 +1056,12 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+ __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
/*
@@ -912,8 +1069,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
*/
extern int ip_vs_estimator_init(void);
extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
/*
@@ -952,14 +1109,14 @@ extern int ip_vs_icmp_xmit_v6
* we are loaded. Just set ip_vs_drop_rate to 'n' and
* we start to drop 1/rate of the packets
*/
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
- if (!ip_vs_drop_rate) return 0;
- if (--ip_vs_drop_counter > 0) return 0;
- ip_vs_drop_counter = ip_vs_drop_rate;
+ if (!ipvs->drop_rate)
+ return 0;
+ if (--ipvs->drop_counter > 0)
+ return 0;
+ ipvs->drop_counter = ipvs->drop_rate;
return 1;
}
@@ -1047,9 +1204,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
* Netfilter connection tracking
* (from ip_vs_nfct.c)
*/
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
- return sysctl_ip_vs_conntrack;
+ return ipvs->sysctl_conntrack;
}
extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1219,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
#else
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
return 0;
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1bf812b21fb7..b3b4a34cb2cc 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -20,6 +20,7 @@
#include <net/netns/conntrack.h>
#endif
#include <net/netns/xfrm.h>
+#include <net/netns/ip_vs.h>
struct proc_dir_entry;
struct net_device;
@@ -94,6 +95,7 @@ struct net {
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+ struct netns_ipvs *ipvs;
};
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d85cff10e169..d0d13378991e 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -50,11 +50,24 @@ union nf_conntrack_expect_proto {
/* per conntrack: application helper private data */
union nf_conntrack_help {
/* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+ defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+ defined(CONFIG_NF_CONNTRACK_H323_MODULE)
struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+ defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
struct nf_ct_sip_master ct_sip_info;
+#endif
};
#include <linux/types.h>
@@ -116,14 +129,14 @@ struct nf_conn {
u_int32_t secmark;
#endif
- /* Storage reserved for other modules: */
- union nf_conntrack_proto proto;
-
/* Extensions */
struct nf_ct_ext *ext;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
};
static inline struct nf_conn *
@@ -189,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
*/
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 96ba5f7dcab6..8fdb04b8cce0 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -23,12 +23,17 @@ struct nf_conntrack_ecache {
static inline struct nf_conntrack_ecache *
nf_ct_ecache_find(const struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+ return NULL;
+#endif
}
static inline struct nf_conntrack_ecache *
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
@@ -45,6 +50,9 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
e->expmask = expmask;
}
return e;
+#else
+ return NULL;
+#endif
};
#ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
@@ -159,7 +167,7 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 0772d296dfdb..2dcf31703acb 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -7,10 +7,19 @@
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
+#endif
NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_CT_EXT_TSTAMP,
+#endif
NF_CT_EXT_NUM,
};
@@ -19,6 +28,7 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c305dbdab6..f1c1311adc2c 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -63,4 +63,10 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
extern int nf_conntrack_helper_init(void);
extern void nf_conntrack_helper_fini(void);
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
+
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index a7547611e8f1..e8010f445ae1 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -73,7 +73,7 @@ struct nf_conntrack_l3proto {
struct module *me;
};
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
/* Protocol registration. */
extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 000000000000..fc9c82b1f06b
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ struct net *net = nf_ct_net(ct);
+
+ if (!net->ct.sysctl_tstamp)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+ return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+ return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+ net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index f5f09f032a90..aff80b190c12 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -56,7 +56,9 @@ struct nf_nat_multi_range_compat {
/* per conntrack: nat application helper private data */
union nf_conntrack_nat_help {
/* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
struct nf_nat_pptp nat_pptp_info;
+#endif
};
struct nf_conn;
@@ -84,7 +86,11 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+ return NULL;
+#endif
}
#else /* !__KERNEL__: iptables wants this to compile. */
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 33602ab66190..3dc7b98effeb 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == IP_NAT_MANIP_SRC)
- return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_SRC_NAT_DONE;
else
- return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_DST_NAT_DONE;
}
struct nlattr;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 373f1a900cf4..8a3906a08f5f 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
#define NLA_PUT_BE16(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be16, attrtype, value)
+#define NLA_PUT_NET16(skb, attrtype, value) \
+ NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u32, attrtype, value)
#define NLA_PUT_BE32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be32, attrtype, value)
+#define NLA_PUT_NET32(skb, attrtype, value) \
+ NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u64, attrtype, value)
#define NLA_PUT_BE64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be64, attrtype, value)
+#define NLA_PUT_NET64(skb, attrtype, value) \
+ NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_STRING(skb, attrtype, value) \
NLA_PUT(skb, attrtype, strlen(value) + 1, value)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index d4958d4c6574..341eb089349e 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -21,15 +21,15 @@ struct netns_ct {
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
+ int sysctl_tstamp;
int sysctl_checksum;
unsigned int sysctl_log_invalid; /* Log invalid packets */
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
+ struct ctl_table_header *tstamp_sysctl_header;
struct ctl_table_header *event_sysctl_header;
#endif
- int hash_vmalloc;
- int expect_vmalloc;
char *slabname;
};
#endif
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
new file mode 100644
index 000000000000..259ebac904bf
--- /dev/null
+++ b/include/net/netns/ip_vs.h
@@ -0,0 +1,143 @@
+/*
+ * IP Virtual Server
+ * Data structure for network namspace
+ *
+ */
+
+#ifndef IP_VS_H_
+#define IP_VS_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/list_nulls.h>
+#include <linux/ip_vs.h>
+#include <asm/atomic.h>
+#include <linux/in.h>
+
+struct ip_vs_stats;
+struct ip_vs_sync_buff;
+struct ctl_table_header;
+
+struct netns_ipvs {
+ int gen; /* Generation */
+ /*
+ * Hash table: for real service lookups
+ */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct list_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ struct mutex app_mutex;
+ struct lock_class_key app_key; /* mutex debuging */
+
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+ spinlock_t tcp_app_lock;
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+ spinlock_t udp_app_lock;
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+ spinlock_t sctp_app_lock;
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats *tot_stats; /* Statistics & est. */
+ struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */
+ seqcount_t *ustats_seq; /* u64 read retry */
+
+ int num_services; /* no of virtual services */
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+ rwlock_t rs_lock; /* real services table */
+ /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+ struct lock_class_key ctl_key; /* ctl_mutex debuging */
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ int sysctl_nat_icmp_send;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ struct list_head sync_queue;
+ spinlock_t sync_lock;
+ struct ip_vs_sync_buff *sync_buff;
+ spinlock_t sync_buff_lock;
+ struct sockaddr_in sync_mcast_addr;
+ struct task_struct *master_thread;
+ struct task_struct *backup_thread;
+ int send_mesg_maxlen;
+ int recv_mesg_maxlen;
+ volatile int sync_state;
+ volatile int master_syncid;
+ volatile int backup_syncid;
+ /* multicast interface name */
+ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+};
+
+#endif /* IP_VS_H_ */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d68c3f121774..e2e2ef57eca2 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -43,7 +43,6 @@ struct netns_ipv4 {
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
unsigned int nat_htable_size;
- int nat_vmalloced;
#endif
int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/protocol.h b/include/net/protocol.h
index dc07495bce4c..6f7eb800974a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -38,7 +38,7 @@ struct net_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index 93e10c453f6b..e5864658dc76 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -49,6 +49,7 @@
struct fib_nh;
struct inet_peer;
+struct fib_info;
struct rtable {
struct dst_entry dst;
@@ -69,6 +70,7 @@ struct rtable {
/* Miscellaneous cached information */
__be32 rt_spec_dst; /* RFC1122 specific destination */
struct inet_peer *peer; /* long-living peer info */
+ struct fib_info *fi; /* for client ref to shared metrics */
};
static inline bool rt_is_input_route(struct rtable *rt)
@@ -180,6 +182,8 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
if (!dst || !src) {
err = __ip_route_output_key(net, rp, &fl);
@@ -207,6 +211,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
fl.proto = protocol;
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
ip_rt_put(*rp);
*rp = NULL;
security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e9eee99d8b1f..16626a04cb03 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -31,10 +31,12 @@ enum qdisc_state_t {
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
- __QDISC___STATE_RUNNING,
+ __QDISC___STATE_RUNNING = 1,
+ __QDISC___STATE_THROTTLED = 2,
};
struct qdisc_size_table {
+ struct rcu_head rcu;
struct list_head list;
struct tc_sizespec szopts;
int refcnt;
@@ -46,14 +48,13 @@ struct Qdisc {
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
#define TCQ_F_BUILTIN 1
-#define TCQ_F_THROTTLED 2
-#define TCQ_F_INGRESS 4
-#define TCQ_F_CAN_BYPASS 8
-#define TCQ_F_MQROOT 16
+#define TCQ_F_INGRESS 2
+#define TCQ_F_CAN_BYPASS 4
+#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
struct Qdisc_ops *ops;
- struct qdisc_size_table *stab;
+ struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
u32 parent;
@@ -78,25 +79,43 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned long __state;
+ unsigned int __state;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
spinlock_t busylock;
};
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
- return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ if (qdisc_is_running(qdisc))
+ return false;
+ qdisc->__state |= __QDISC___STATE_RUNNING;
+ return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+ return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+ qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+ qdisc->__state &= ~__QDISC___STATE_THROTTLED;
}
struct Qdisc_class_ops {
@@ -331,8 +350,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops);
extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
- struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
@@ -411,12 +430,20 @@ enum net_xmit_qdisc_t {
#define net_xmit_drop_count(e) (1)
#endif
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct Qdisc *sch)
{
#ifdef CONFIG_NET_SCHED
- if (sch->stab)
- qdisc_calculate_pkt_len(skb, sch->stab);
+ struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+ if (stab)
+ __qdisc_calculate_pkt_len(skb, stab);
#endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch);
}
@@ -445,7 +472,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -460,8 +486,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
{
struct sk_buff *skb = __skb_dequeue(list);
- if (likely(skb != NULL))
+ if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ }
return skb;
}
@@ -474,10 +502,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list)
{
- struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+ struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
+ sch->qstats.backlog -= len;
kfree_skb(skb);
return len;
}
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 2a128c8c2718..e73ebdae323d 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -78,6 +78,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_GET_PEER_ADDR_INFO 15
#define SCTP_DELAYED_ACK_TIME 16
#define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME
+#define SCTP_DELAYED_SACK SCTP_DELAYED_ACK_TIME
#define SCTP_CONTEXT 17
#define SCTP_FRAGMENT_INTERLEAVE 18
#define SCTP_PARTIAL_DELIVERY_POINT 19 /* Set/Get partial delivery point */
diff --git a/include/net/sock.h b/include/net/sock.h
index d884d268c704..e3893a2b5d25 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -753,6 +753,8 @@ struct proto {
int level,
int optname, char __user *optval,
int __user *option);
+ int (*compat_ioctl)(struct sock *sk,
+ unsigned int cmd, unsigned long arg);
#endif
int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
@@ -1189,7 +1191,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu);
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+ call_rcu(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38509f047382..7118668ad534 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP thin-stream limits */
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+/* TCP initial congestion window */
+#define TCP_INIT_CWND 10
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -799,15 +802,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
- return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
@@ -1404,7 +1398,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/udp.h b/include/net/udp.h
index bb967dd59bf7..e82f3a8c0f8f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -245,5 +245,5 @@ extern void udp4_proc_exit(void);
extern void udp_init(void);
extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
#endif /* _UDP_H */
diff --git a/init/Kconfig b/init/Kconfig
index 4e337906016e..be788c0957d4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -745,8 +745,8 @@ config DEBUG_BLK_CGROUP
endif # CGROUPS
menuconfig NAMESPACES
- bool "Namespaces support" if EMBEDDED
- default !EMBEDDED
+ bool "Namespaces support" if EXPERT
+ default !EXPERT
help
Provides the way to make tasks work with different objects using
the same id. For example same IPC id may refer to different objects
@@ -899,23 +899,31 @@ config SYSCTL
config ANON_INODES
bool
-menuconfig EMBEDDED
- bool "Configure standard kernel features (for small systems)"
+menuconfig EXPERT
+ bool "Configure standard kernel features (expert users)"
help
This option allows certain base kernel options and settings
to be disabled or tweaked. This is for specialized
environments which can tolerate a "non-standard" kernel.
Only use this if you really know what you are doing.
+config EMBEDDED
+ bool "Embedded system"
+ select EXPERT
+ help
+ This option should be enabled if compiling the kernel for
+ an embedded system so certain expert options are available
+ for configuration.
+
config UID16
- bool "Enable 16-bit UID system calls" if EMBEDDED
+ bool "Enable 16-bit UID system calls" if EXPERT
depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
default y
help
This enables the legacy 16-bit UID syscall wrappers.
config SYSCTL_SYSCALL
- bool "Sysctl syscall support" if EMBEDDED
+ bool "Sysctl syscall support" if EXPERT
depends on PROC_SYSCTL
default y
select SYSCTL
@@ -932,7 +940,7 @@ config SYSCTL_SYSCALL
If unsure say Y here.
config KALLSYMS
- bool "Load all symbols for debugging/ksymoops" if EMBEDDED
+ bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
help
Say Y here to let the kernel print out symbolic crash information and
@@ -963,7 +971,7 @@ config KALLSYMS_EXTRA_PASS
config HOTPLUG
- bool "Support for hot-pluggable devices" if EMBEDDED
+ bool "Support for hot-pluggable devices" if EXPERT
default y
help
This option is provided for the case where no hotplug or uevent
@@ -973,7 +981,7 @@ config HOTPLUG
config PRINTK
default y
- bool "Enable support for printk" if EMBEDDED
+ bool "Enable support for printk" if EXPERT
help
This option enables normal printk support. Removing it
eliminates most of the message strings from the kernel image
@@ -982,7 +990,7 @@ config PRINTK
strongly discouraged.
config BUG
- bool "BUG() support" if EMBEDDED
+ bool "BUG() support" if EXPERT
default y
help
Disabling this option eliminates support for BUG and WARN, reducing
@@ -993,12 +1001,12 @@ config BUG
config ELF_CORE
default y
- bool "Enable ELF core dumps" if EMBEDDED
+ bool "Enable ELF core dumps" if EXPERT
help
Enable support for generating core dumps. Disabling saves about 4k.
config PCSPKR_PLATFORM
- bool "Enable PC-Speaker support" if EMBEDDED
+ bool "Enable PC-Speaker support" if EXPERT
depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES
default y
help
@@ -1007,14 +1015,14 @@ config PCSPKR_PLATFORM
config BASE_FULL
default y
- bool "Enable full-sized data structures for core" if EMBEDDED
+ bool "Enable full-sized data structures for core" if EXPERT
help
Disabling this option reduces the size of miscellaneous core
kernel data structures. This saves memory on small machines,
but may reduce performance.
config FUTEX
- bool "Enable futex support" if EMBEDDED
+ bool "Enable futex support" if EXPERT
default y
select RT_MUTEXES
help
@@ -1023,7 +1031,7 @@ config FUTEX
run glibc-based applications correctly.
config EPOLL
- bool "Enable eventpoll support" if EMBEDDED
+ bool "Enable eventpoll support" if EXPERT
default y
select ANON_INODES
help
@@ -1031,7 +1039,7 @@ config EPOLL
support for epoll family of system calls.
config SIGNALFD
- bool "Enable signalfd() system call" if EMBEDDED
+ bool "Enable signalfd() system call" if EXPERT
select ANON_INODES
default y
help
@@ -1041,7 +1049,7 @@ config SIGNALFD
If unsure, say Y.
config TIMERFD
- bool "Enable timerfd() system call" if EMBEDDED
+ bool "Enable timerfd() system call" if EXPERT
select ANON_INODES
default y
help
@@ -1051,7 +1059,7 @@ config TIMERFD
If unsure, say Y.
config EVENTFD
- bool "Enable eventfd() system call" if EMBEDDED
+ bool "Enable eventfd() system call" if EXPERT
select ANON_INODES
default y
help
@@ -1061,7 +1069,7 @@ config EVENTFD
If unsure, say Y.
config SHMEM
- bool "Use full shmem filesystem" if EMBEDDED
+ bool "Use full shmem filesystem" if EXPERT
default y
depends on MMU
help
@@ -1072,7 +1080,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
- bool "Enable AIO support" if EMBEDDED
+ bool "Enable AIO support" if EXPERT
default y
help
This option enables POSIX asynchronous I/O which may by used
@@ -1149,16 +1157,16 @@ endmenu
config VM_EVENT_COUNTERS
default y
- bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
+ bool "Enable VM event counters for /proc/vmstat" if EXPERT
help
VM event counters are needed for event counts to be shown.
This option allows the disabling of the VM event counters
- on EMBEDDED systems. /proc/vmstat will only show page counts
+ on EXPERT systems. /proc/vmstat will only show page counts
if VM event counters are disabled.
config PCI_QUIRKS
default y
- bool "Enable PCI quirk workarounds" if EMBEDDED
+ bool "Enable PCI quirk workarounds" if EXPERT
depends on PCI
help
This enables workarounds for various PCI chipset
@@ -1167,7 +1175,7 @@ config PCI_QUIRKS
config SLUB_DEBUG
default y
- bool "Enable SLUB debugging support" if EMBEDDED
+ bool "Enable SLUB debugging support" if EXPERT
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
@@ -1211,7 +1219,7 @@ config SLUB
a slab allocator.
config SLOB
- depends on EMBEDDED
+ depends on EXPERT
bool "SLOB (Simple Allocator)"
help
SLOB replaces the stock allocator with a drastically simpler
@@ -1222,7 +1230,7 @@ endchoice
config MMAP_ALLOW_UNINITIALIZED
bool "Allow mmapped anonymous memory to be uninitialized"
- depends on EMBEDDED && !MMU
+ depends on EXPERT && !MMU
default n
help
Normally, and according to the Linux spec, anonymous memory obtained
diff --git a/init/main.c b/init/main.c
index 00799c1d4628..33c37c379e96 100644
--- a/init/main.c
+++ b/init/main.c
@@ -96,6 +96,15 @@ static inline void mark_rodata_ro(void) { }
extern void tc_init(void);
#endif
+/*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled. This means
+ * two things - IRQ must not be enabled before the flag is cleared and some
+ * operations which are not allowed with IRQ disabled are allowed while the
+ * flag is set.
+ */
+bool early_boot_irqs_disabled __read_mostly;
+
enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state);
@@ -554,7 +563,7 @@ asmlinkage void __init start_kernel(void)
cgroup_init_early();
local_irq_disable();
- early_boot_irqs_off();
+ early_boot_irqs_disabled = true;
/*
* Interrupts are still disabled. Do necessary setups, then
@@ -621,7 +630,7 @@ asmlinkage void __init start_kernel(void)
if (!irqs_disabled())
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
"enabled early\n");
- early_boot_irqs_on();
+ early_boot_irqs_disabled = false;
local_irq_enable();
/* Interrupts are enabled now so all GFP allocations are safe. */
diff --git a/kernel/audit.c b/kernel/audit.c
index e4956244ae50..162e88e33bc9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -74,6 +74,8 @@ static int audit_initialized;
int audit_enabled;
int audit_ever_enabled;
+EXPORT_SYMBOL_GPL(audit_enabled);
+
/* Default state when kernel boots without any parameters. */
static int audit_default;
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 31d766bf5d2e..8e42fec7686d 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -9,9 +9,6 @@ menu "IRQ subsystem"
config GENERIC_HARDIRQS
def_bool y
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
# Select this to disable the deprecated stuff
config GENERIC_HARDIRQS_NO_DEPRECATED
def_bool n
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e2347eb63306..3540a7190122 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -118,114 +118,3 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
return retval;
}
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-
-#ifdef CONFIG_ENABLE_WARN_DEPRECATED
-# warning __do_IRQ is deprecated. Please convert to proper flow handlers
-#endif
-
-/**
- * __do_IRQ - original all in one highlevel IRQ handler
- * @irq: the interrupt number
- *
- * __do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
- * This is the original x86 implementation which is used for every
- * interrupt type.
- */
-unsigned int __do_IRQ(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction *action;
- unsigned int status;
-
- kstat_incr_irqs_this_cpu(irq, desc);
-
- if (CHECK_IRQ_PER_CPU(desc->status)) {
- irqreturn_t action_ret;
-
- /*
- * No locking required for CPU-local interrupts:
- */
- if (desc->irq_data.chip->ack)
- desc->irq_data.chip->ack(irq);
- if (likely(!(desc->status & IRQ_DISABLED))) {
- action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
- }
- desc->irq_data.chip->end(irq);
- return 1;
- }
-
- raw_spin_lock(&desc->lock);
- if (desc->irq_data.chip->ack)
- desc->irq_data.chip->ack(irq);
- /*
- * REPLAY is when Linux resends an IRQ that was dropped earlier
- * WAITING is used by probe to mark irqs that are being tested
- */
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
- status |= IRQ_PENDING; /* we _want_ to handle it */
-
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
-
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- * Since we set PENDING, if another processor is handling
- * a different instance of this same irq, the other processor
- * will take care of it.
- */
- if (unlikely(!action))
- goto out;
-
- /*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- irqreturn_t action_ret;
-
- raw_spin_unlock(&desc->lock);
-
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
-
- raw_spin_lock(&desc->lock);
- if (likely(!(desc->status & IRQ_PENDING)))
- break;
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
-
-out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->irq_data.chip->end(irq);
- raw_spin_unlock(&desc->lock);
-
- return 1;
-}
-#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f747dd..0587c5ceaed8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -134,6 +134,10 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
irq_set_thread_affinity(desc);
}
#endif
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
+ schedule_work(&desc->affinity_notify->work);
+ }
desc->status |= IRQ_AFFINITY_SET;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
@@ -155,6 +159,79 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+
+ if (!desc)
+ goto out;
+
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (desc->status & IRQ_MOVE_PENDING)
+ cpumask_copy(cpumask, desc->pending_mask);
+ else
+#endif
+ cpumask_copy(cpumask, desc->affinity);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ notify->notify(notify, cpumask);
+
+ free_cpumask_var(cpumask);
+out:
+ kref_put(&notify->kref, notify->release);
+}
+
+/**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+ * @notify: Context for notification, or %NULL to disable
+ * notification. Function pointers must be initialised;
+ * the other fields will be initialised by this function.
+ *
+ * Must be called in process context. Notification may only be enabled
+ * after the IRQ is allocated and must be disabled before the IRQ is
+ * freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_affinity_notify *old_notify;
+ unsigned long flags;
+
+ /* The release function is promised process context */
+ might_sleep();
+
+ if (!desc)
+ return -EINVAL;
+
+ /* Complete initialisation of *notify */
+ if (notify) {
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ old_notify = desc->affinity_notify;
+ desc->affinity_notify = notify;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ if (old_notify)
+ kref_put(&old_notify->kref, old_notify->release);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
+
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
@@ -1004,6 +1081,11 @@ void free_irq(unsigned int irq, void *dev_id)
if (!desc)
return;
+#ifdef CONFIG_SMP
+ if (WARN_ON(desc->affinity_notify))
+ desc->affinity_notify = NULL;
+#endif
+
chip_bus_lock(desc);
kfree(__free_irq(irq, dev_id));
chip_bus_sync_unlock(desc);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 42ba65dff7d9..0d2058da80f5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2292,22 +2292,6 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
}
/*
- * Debugging helper: via this flag we know that we are in
- * 'early bootup code', and will warn about any invalid irqs-on event:
- */
-static int early_boot_irqs_enabled;
-
-void early_boot_irqs_off(void)
-{
- early_boot_irqs_enabled = 0;
-}
-
-void early_boot_irqs_on(void)
-{
- early_boot_irqs_enabled = 1;
-}
-
-/*
* Hardirqs will be enabled:
*/
void trace_hardirqs_on_caller(unsigned long ip)
@@ -2319,7 +2303,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
- if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
+ if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
return;
if (unlikely(curr->hardirqs_enabled)) {
diff --git a/kernel/params.c b/kernel/params.c
index 08107d181758..0da1411222b9 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -719,9 +719,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
params[i].ops->free(params[i].arg);
}
-static void __init kernel_add_sysfs_param(const char *name,
- struct kernel_param *kparam,
- unsigned int name_skip)
+static struct module_kobject * __init locate_module_kobject(const char *name)
{
struct module_kobject *mk;
struct kobject *kobj;
@@ -729,10 +727,7 @@ static void __init kernel_add_sysfs_param(const char *name,
kobj = kset_find_obj(module_kset, name);
if (kobj) {
- /* We already have one. Remove params so we can add more. */
mk = to_module_kobject(kobj);
- /* We need to remove it before adding parameters. */
- sysfs_remove_group(&mk->kobj, &mk->mp->grp);
} else {
mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
BUG_ON(!mk);
@@ -743,15 +738,36 @@ static void __init kernel_add_sysfs_param(const char *name,
"%s", name);
if (err) {
kobject_put(&mk->kobj);
- printk(KERN_ERR "Module '%s' failed add to sysfs, "
- "error number %d\n", name, err);
- printk(KERN_ERR "The system will be unstable now.\n");
- return;
+ printk(KERN_ERR
+ "Module '%s' failed add to sysfs, error number %d\n",
+ name, err);
+ printk(KERN_ERR
+ "The system will be unstable now.\n");
+ return NULL;
}
- /* So that exit path is even. */
+
+ /* So that we hold reference in both cases. */
kobject_get(&mk->kobj);
}
+ return mk;
+}
+
+static void __init kernel_add_sysfs_param(const char *name,
+ struct kernel_param *kparam,
+ unsigned int name_skip)
+{
+ struct module_kobject *mk;
+ int err;
+
+ mk = locate_module_kobject(name);
+ if (!mk)
+ return;
+
+ /* We need to remove old parameters before adding more. */
+ if (mk->mp)
+ sysfs_remove_group(&mk->kobj, &mk->mp->grp);
+
/* These should not fail at boot. */
err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
BUG_ON(err);
@@ -796,6 +812,32 @@ static void __init param_sysfs_builtin(void)
}
}
+ssize_t __modver_version_show(struct module_attribute *mattr,
+ struct module *mod, char *buf)
+{
+ struct module_version_attribute *vattr =
+ container_of(mattr, struct module_version_attribute, mattr);
+
+ return sprintf(buf, "%s\n", vattr->version);
+}
+
+extern struct module_version_attribute __start___modver[], __stop___modver[];
+
+static void __init version_sysfs_builtin(void)
+{
+ const struct module_version_attribute *vattr;
+ struct module_kobject *mk;
+ int err;
+
+ for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+ mk = locate_module_kobject(vattr->module_name);
+ if (mk) {
+ err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+ kobject_uevent(&mk->kobj, KOBJ_ADD);
+ kobject_put(&mk->kobj);
+ }
+ }
+}
/* module-related sysfs stuff */
@@ -875,6 +917,7 @@ static int __init param_sysfs_init(void)
}
module_sysfs_initialized = 1;
+ version_sysfs_builtin();
param_sysfs_builtin();
return 0;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 84522c796987..126a302c481c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2201,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid)
if (!task)
return ERR_PTR(-ESRCH);
- /*
- * Can't attach events to a dying task.
- */
- err = -ESRCH;
- if (task->flags & PF_EXITING)
- goto errout;
-
/* Reuse ptrace permission checks for now. */
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ))
@@ -2268,14 +2261,27 @@ retry:
get_ctx(ctx);
- if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
- /*
- * We raced with some other task; use
- * the context they set.
- */
+ err = 0;
+ mutex_lock(&task->perf_event_mutex);
+ /*
+ * If it has already passed perf_event_exit_task().
+ * we must see PF_EXITING, it takes this mutex too.
+ */
+ if (task->flags & PF_EXITING)
+ err = -ESRCH;
+ else if (task->perf_event_ctxp[ctxn])
+ err = -EAGAIN;
+ else
+ rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+ mutex_unlock(&task->perf_event_mutex);
+
+ if (unlikely(err)) {
put_task_struct(task);
kfree(ctx);
- goto retry;
+
+ if (err == -EAGAIN)
+ goto retry;
+ goto errout;
}
}
@@ -5374,6 +5380,8 @@ free_dev:
goto out;
}
+static struct lock_class_key cpuctx_mutex;
+
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
int cpu, ret;
@@ -5422,6 +5430,7 @@ skip_type:
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
+ lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
@@ -6127,7 +6136,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
- child_ctx = child->perf_event_ctxp[ctxn];
+ child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
task_ctx_sched_out(child_ctx, EVENT_ALL);
/*
@@ -6440,11 +6449,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
unsigned long flags;
int ret = 0;
- child->perf_event_ctxp[ctxn] = NULL;
-
- mutex_init(&child->perf_event_mutex);
- INIT_LIST_HEAD(&child->perf_event_list);
-
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
@@ -6533,6 +6537,10 @@ int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
+ memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+ mutex_init(&child->perf_event_mutex);
+ INIT_LIST_HEAD(&child->perf_event_list);
+
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
diff --git a/kernel/sched.c b/kernel/sched.c
index ea3e5eff3878..18d38e4ec7ba 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -553,9 +553,6 @@ struct rq {
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
-
- /* BKL stats */
- unsigned int bkl_count;
#endif
};
@@ -609,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p)
struct task_group *tg;
struct cgroup_subsys_state *css;
+ if (p->flags & PF_EXITING)
+ return &root_task_group;
+
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
tg = container_of(css, struct task_group, css);
@@ -3887,7 +3887,7 @@ static inline void schedule_debug(struct task_struct *prev)
schedstat_inc(this_rq(), sched_count);
#ifdef CONFIG_SCHEDSTATS
if (unlikely(prev->lock_depth >= 0)) {
- schedstat_inc(this_rq(), bkl_count);
+ schedstat_inc(this_rq(), rq_sched_info.bkl_count);
schedstat_inc(prev, sched_info.bkl_count);
}
#endif
@@ -4871,7 +4871,8 @@ recheck:
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
- task_group(p)->rt_bandwidth.rt_runtime == 0) {
+ task_group(p)->rt_bandwidth.rt_runtime == 0 &&
+ !task_group_is_autogroup(task_group(p))) {
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return -EPERM;
@@ -8882,6 +8883,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
}
}
+static void
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
+{
+ /*
+ * cgroup_exit() is called in the copy_process() failure path.
+ * Ignore this case since the task hasn't ran yet, this avoids
+ * trying to poke a half freed task state from generic code.
+ */
+ if (!(task->flags & PF_EXITING))
+ return;
+
+ sched_move_task(task);
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
@@ -8954,6 +8969,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.destroy = cpu_cgroup_destroy,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
+ .exit = cpu_cgroup_exit,
.populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 32a723b8f84c..9fb656283157 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref)
{
struct autogroup *ag = container_of(kref, struct autogroup, kref);
+#ifdef CONFIG_RT_GROUP_SCHED
+ /* We've redirected RT tasks to the root task group... */
+ ag->tg->rt_se = NULL;
+ ag->tg->rt_rq = NULL;
+#endif
sched_destroy_group(ag->tg);
}
@@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
return ag;
}
+#ifdef CONFIG_RT_GROUP_SCHED
+static void free_rt_sched_group(struct task_group *tg);
+#endif
+
static inline struct autogroup *autogroup_create(void)
{
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
@@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void)
init_rwsem(&ag->lock);
ag->id = atomic_inc_return(&autogroup_seq_nr);
ag->tg = tg;
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Autogroup RT tasks are redirected to the root task group
+ * so we don't have to move tasks around upon policy change,
+ * or flail around trying to allocate bandwidth on the fly.
+ * A bandwidth exception in __sched_setscheduler() allows
+ * the policy change to proceed. Thereafter, task_group()
+ * returns &root_task_group, so zero bandwidth is required.
+ */
+ free_rt_sched_group(tg);
+ tg->rt_se = root_task_group.rt_se;
+ tg->rt_rq = root_task_group.rt_rq;
+#endif
tg->autogroup = ag;
return ag;
@@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
return true;
}
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+ return tg != &root_task_group && tg->autogroup;
+}
+
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
@@ -231,6 +258,11 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
#ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
+ int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+ if (!enabled || !tg->autogroup)
+ return 0;
+
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
}
#endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 5358e241cb20..7b859ffe5dad 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -15,6 +15,10 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg);
static inline void autogroup_init(struct task_struct *init_task) { }
static inline void autogroup_free(struct task_group *tg) { }
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+ return 0;
+}
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 1dfae3d014b5..eb6cb8edd075 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -16,6 +16,8 @@
#include <linux/kallsyms.h>
#include <linux/utsname.h>
+static DEFINE_SPINLOCK(sched_debug_lock);
+
/*
* This allows printing both to /proc/sched_debug and
* to the console
@@ -86,6 +88,26 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
}
#endif
+#ifdef CONFIG_CGROUP_SCHED
+static char group_path[PATH_MAX];
+
+static char *task_group_path(struct task_group *tg)
+{
+ if (autogroup_path(tg, group_path, PATH_MAX))
+ return group_path;
+
+ /*
+ * May be NULL if the underlying cgroup isn't fully-created yet
+ */
+ if (!tg->css.cgroup) {
+ group_path[0] = '\0';
+ return group_path;
+ }
+ cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+ return group_path;
+}
+#endif
+
static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
@@ -108,6 +130,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
+#ifdef CONFIG_CGROUP_SCHED
+ SEQ_printf(m, " %s", task_group_path(task_group(p)));
+#endif
SEQ_printf(m, "\n");
}
@@ -144,7 +169,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
struct sched_entity *last;
unsigned long flags;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
+#else
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
+#endif
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
@@ -191,7 +220,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
+#ifdef CONFIG_RT_GROUP_SCHED
+ SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
+#else
SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
+#endif
#define P(x) \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
@@ -212,6 +245,7 @@ extern __read_mostly int sched_clock_running;
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
#ifdef CONFIG_X86
{
@@ -262,14 +296,20 @@ static void print_cpu(struct seq_file *m, int cpu)
P(ttwu_count);
P(ttwu_local);
- P(bkl_count);
+ SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
+ rq->rq_sched_info.bkl_count);
#undef P
+#undef P64
#endif
+ spin_lock_irqsave(&sched_debug_lock, flags);
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
+ rcu_read_lock();
print_rq(m, rq, cpu);
+ rcu_read_unlock();
+ spin_unlock_irqrestore(&sched_debug_lock, flags);
}
static const char *sched_tunable_scaling_names[] = {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae65cf0..354769979c02 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->nr_running--;
}
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
int global_update)
{
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+ long weight_delta)
+{
+ long load_weight, load, shares;
+
+ load = cfs_rq->load.weight + weight_delta;
+
+ load_weight = atomic_read(&tg->load_weight);
+ load_weight -= cfs_rq->load_contribution;
+ load_weight += load;
+
+ shares = (tg->shares * load);
+ if (load_weight)
+ shares /= load_weight;
+
+ if (shares < MIN_SHARES)
+ shares = MIN_SHARES;
+ if (shares > tg->shares)
+ shares = tg->shares;
+
+ return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+ if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+ update_cfs_load(cfs_rq, 0);
+ update_cfs_shares(cfs_rq, 0);
+ }
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+ long weight_delta)
+{
+ return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
@@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
{
struct task_group *tg;
struct sched_entity *se;
- long load_weight, load, shares;
+ long shares;
if (!cfs_rq)
return;
@@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
se = tg->se[cpu_of(rq_of(cfs_rq))];
if (!se)
return;
-
- load = cfs_rq->load.weight + weight_delta;
-
- load_weight = atomic_read(&tg->load_weight);
- load_weight -= cfs_rq->load_contribution;
- load_weight += load;
-
- shares = (tg->shares * load);
- if (load_weight)
- shares /= load_weight;
-
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- if (shares > tg->shares)
- shares = tg->shares;
+#ifndef CONFIG_SMP
+ if (likely(se->load.weight == tg->shares))
+ return;
+#endif
+ shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
reweight_entity(cfs_rq_of(se), se, shares);
}
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
- if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, 0);
- }
-}
#else /* CONFIG_FAIR_GROUP_SCHED */
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
@@ -1062,6 +1090,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
struct sched_entity *se = __pick_next_entity(cfs_rq);
s64 delta = curr->vruntime - se->vruntime;
+ if (delta < 0)
+ return;
+
if (delta > ideal_runtime)
resched_task(rq_of(cfs_rq)->curr);
}
@@ -1362,27 +1393,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
return wl;
for_each_sched_entity(se) {
- long S, rw, s, a, b;
+ long lw, w;
- S = se->my_q->tg->shares;
- s = se->load.weight;
- rw = se->my_q->load.weight;
+ tg = se->my_q->tg;
+ w = se->my_q->load.weight;
- a = S*(rw + wl);
- b = S*rw + s*wg;
+ /* use this cpu's instantaneous contribution */
+ lw = atomic_read(&tg->load_weight);
+ lw -= se->my_q->load_contribution;
+ lw += w + wg;
- wl = s*(a-b);
+ wl += w;
- if (likely(b))
- wl /= b;
+ if (lw > 0 && wl < lw)
+ wl = (wl * tg->shares) / lw;
+ else
+ wl = tg->shares;
- /*
- * Assume the group is already running and will
- * thus already be accounted for in the weight.
- *
- * That is, moving shares between CPUs, does not
- * alter the group weight.
- */
+ /* zero point is MIN_SHARES */
+ if (wl < MIN_SHARES)
+ wl = MIN_SHARES;
+ wl -= se->load.weight;
wg = 0;
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 4ec30e069987..9910744f0856 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void)
*/
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
+ void (*func) (void *info);
- if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
+ /*
+ * Since we walk the list without any locks, we might
+ * see an entry that was completed, removed from the
+ * list and is in the process of being reused.
+ *
+ * We must check that the cpu is in the cpumask before
+ * checking the refs, and both must be set before
+ * executing the callback on this cpu.
+ */
+
+ if (!cpumask_test_cpu(cpu, data->cpumask))
+ continue;
+
+ smp_rmb();
+
+ if (atomic_read(&data->refs) == 0)
continue;
+ func = data->csd.func; /* for later warn */
data->csd.func(data->csd.info);
+ /*
+ * If the cpu mask is not still set then it enabled interrupts,
+ * we took another smp interrupt, and executed the function
+ * twice on this cpu. In theory that copy decremented refs.
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
+ WARN(1, "%pS enabled interrupts and double executed\n",
+ func);
+ continue;
+ }
+
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
- if (!refs) {
- raw_spin_lock(&call_function.lock);
- list_del_rcu(&data->csd.list);
- raw_spin_unlock(&call_function.lock);
- }
if (refs)
continue;
+ WARN_ON(!cpumask_empty(data->cpumask));
+
+ raw_spin_lock(&call_function.lock);
+ list_del_rcu(&data->csd.list);
+ raw_spin_unlock(&call_function.lock);
+
csd_unlock(&data->csd);
}
@@ -430,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask,
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
- && !oops_in_progress);
+ && !oops_in_progress && !early_boot_irqs_disabled);
/* So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
@@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask,
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
+ BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
+
+ /*
+ * To ensure the interrupt handler gets an complete view
+ * we order the cpumask and refs writes and order the read
+ * of them in the interrupt handler. In addition we may
+ * only clear our own cpu bit from the mask.
+ */
+ smp_wmb();
+
atomic_set(&data->refs, cpumask_weight(data->cpumask));
raw_spin_lock_irqsave(&call_function.lock, flags);
@@ -533,17 +572,20 @@ void ipi_call_unlock_irq(void)
#endif /* USE_GENERIC_SMP_HELPERS */
/*
- * Call a function on all processors
+ * Call a function on all processors. May be used during early boot while
+ * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
+ * of local_irq_disable/enable().
*/
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
+ unsigned long flags;
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, wait);
- local_irq_disable();
+ local_irq_save(flags);
func(info);
- local_irq_enable();
+ local_irq_restore(flags);
preempt_enable();
return ret;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3e216e01bbd1..c55ea2433471 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,8 +642,7 @@ static void tick_nohz_switch_to_nohz(void)
}
local_irq_enable();
- printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
- smp_processor_id());
+ printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
}
/*
@@ -795,8 +794,10 @@ void tick_setup_sched_timer(void)
}
#ifdef CONFIG_NO_HZ
- if (tick_nohz_enabled)
+ if (tick_nohz_enabled) {
ts->nohz_mode = NOHZ_MODE_HIGHRES;
+ printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
+ }
#endif
}
#endif /* HIGH_RES_TIMERS */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5cf8c602b880..92b6e1e12d98 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -453,14 +453,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
* Stubs:
*/
-void early_boot_irqs_off(void)
-{
-}
-
-void early_boot_irqs_on(void)
-{
-}
-
void trace_softirqs_on(unsigned long ip)
{
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec82f88a..11869faa6819 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
worker->flags &= ~flags;
- /* if transitioning out of NOT_RUNNING, increment nr_running */
+ /*
+ * If transitioning out of NOT_RUNNING, increment nr_running. Note
+ * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
+ * of multiple flags, not a single flag.
+ */
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock)
spin_unlock_irq(&gcwq->lock);
work_clear_pending(work);
- lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
f(work);
@@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);
- lock_map_acquire(&cwq->wq->lockdep_map);
+ /*
+ * If @max_active is 1 or rescuer is in use, flushing another work
+ * item on the same workqueue may lead to deadlock. Make sure the
+ * flusher is not running on the same workqueue by verifying write
+ * access.
+ */
+ if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ else
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
+
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
diff --git a/lib/Kconfig b/lib/Kconfig
index 0ee67e08ad3e..8334342e0d05 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
depends on EXPERIMENTAL && BROKEN
+config CPU_RMAP
+ bool
+ depends on SMP
+
#
# Netlink attribute parsing support is select'ed if needed
#
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2d05adb98401..3967c2356e37 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -657,7 +657,7 @@ config DEBUG_HIGHMEM
Disable for production systems.
config DEBUG_BUGVERBOSE
- bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+ bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
@@ -729,8 +729,8 @@ config DEBUG_WRITECOUNT
If unsure, say N.
config DEBUG_MEMORY_INIT
- bool "Debug memory initialisation" if EMBEDDED
- default !EMBEDDED
+ bool "Debug memory initialisation" if EXPERT
+ default !EXPERT
help
Enable this for additional checks during memory initialisation.
The sanity checks verify aspects of the VM such as the memory model
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f7d41d..b73ba01a818a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_AVERAGE) += average.o
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 000000000000..987acfafeb83
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities. This can be seen as a reverse-map of
+ * CPU affinity. However, we do not assume that the object affinities
+ * cover all CPUs in the system. For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+ struct cpu_rmap *rmap;
+ unsigned int cpu;
+ size_t obj_offset;
+
+ /* This is a silly number of objects, and we use u16 indices. */
+ if (size > 0xffff)
+ return NULL;
+
+ /* Offset of object pointer array from base structure */
+ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+ sizeof(void *));
+
+ rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+ if (!rmap)
+ return NULL;
+
+ rmap->obj = (void **)((char *)rmap + obj_offset);
+
+ /* Initially assign CPUs to objects on a rota, since we have
+ * no idea where the objects are. Use infinite distance, so
+ * any object with known distance is preferable. Include the
+ * CPUs that are not present/online, since we definitely want
+ * any newly-hotplugged CPUs to have some object assigned.
+ */
+ for_each_possible_cpu(cpu) {
+ rmap->near[cpu].index = cpu % size;
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ }
+
+ rmap->size = size;
+ return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+ const struct cpumask *mask, u16 dist)
+{
+ int neigh;
+
+ for_each_cpu(neigh, mask) {
+ if (rmap->near[cpu].dist > dist &&
+ rmap->near[neigh].dist <= dist) {
+ rmap->near[cpu].index = rmap->near[neigh].index;
+ rmap->near[cpu].dist = dist;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+ unsigned index;
+ unsigned int cpu;
+
+ pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+ for_each_possible_cpu(cpu) {
+ index = rmap->near[cpu].index;
+ pr_info("cpu %d -> obj %u (distance %u)\n",
+ cpu, index, rmap->near[cpu].dist);
+ }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+ u16 index;
+
+ BUG_ON(rmap->used >= rmap->size);
+ index = rmap->used++;
+ rmap->obj[index] = obj;
+ return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity)
+{
+ cpumask_var_t update_mask;
+ unsigned int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* Invalidate distance for all CPUs for which this used to be
+ * the nearest object. Mark those CPUs for update.
+ */
+ for_each_online_cpu(cpu) {
+ if (rmap->near[cpu].index == index) {
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ cpumask_set_cpu(cpu, update_mask);
+ }
+ }
+
+ debug_print_rmap(rmap, "after invalidating old distances");
+
+ /* Set distance to 0 for all CPUs in the new affinity mask.
+ * Mark all CPUs within their NUMA nodes for update.
+ */
+ for_each_cpu(cpu, affinity) {
+ rmap->near[cpu].index = index;
+ rmap->near[cpu].dist = 0;
+ cpumask_or(update_mask, update_mask,
+ cpumask_of_node(cpu_to_node(cpu)));
+ }
+
+ debug_print_rmap(rmap, "after updating neighbours");
+
+ /* Update distances based on topology */
+ for_each_cpu(cpu, update_mask) {
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_thread_cpumask(cpu), 1))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_core_cpumask(cpu), 2))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ cpumask_of_node(cpu_to_node(cpu)), 3))
+ continue;
+ /* We could continue into NUMA node distances, but for now
+ * we give up.
+ */
+ }
+
+ debug_print_rmap(rmap, "after copying neighbours");
+
+ free_cpumask_var(update_mask);
+ return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+ struct irq_glue *glue;
+ u16 index;
+
+ if (!rmap)
+ return;
+
+ for (index = 0; index < rmap->used; index++) {
+ glue = rmap->obj[index];
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+ irq_run_affinity_notifiers();
+
+ kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+ struct irq_glue *glue =
+ container_of(notify, struct irq_glue, notify);
+ int rc;
+
+ rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+ if (rc)
+ pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+ kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+ struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ int rc;
+
+ if (!glue)
+ return -ENOMEM;
+ glue->notify.notify = irq_cpu_rmap_notify;
+ glue->notify.release = irq_cpu_rmap_release;
+ glue->rmap = rmap;
+ glue->index = cpu_rmap_add(rmap, glue);
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc)
+ kfree(glue);
+ return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331b3e47..e0cc0146ae62 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
*
* INTRODUCTION
*
- * The textsearch infrastructure provides text searching facitilies for
+ * The textsearch infrastructure provides text searching facilities for
* both linear and non-linear data. Individual search algorithms are
* implemented in modules and chosen by the user.
*
@@ -43,7 +43,7 @@
* to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
- * (5) Algorithm calls get_next_block() provided by the user continously
+ * (5) Algorithm calls get_next_block() provided by the user continuously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
* the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
* to perform case insensitive matching. But it might slow down
* performance of algorithm, so you should use it at own your risk.
- * The returned configuration may then be used for an arbitary
+ * The returned configuration may then be used for an arbitrary
* amount of times and even in parallel as long as a separate struct
* ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
* implementation and calling textsearch_find(). Both functions return
- * the position of the first occurrence of the patern or UINT_MAX if
- * no match was found. Subsequent occurences can be found by calling
+ * the position of the first occurrence of the pattern or UINT_MAX if
+ * no match was found. Subsequent occurrences can be found by calling
* textsearch_next() regardless of the linearity of the data.
*
* Once you're done using a configuration it must be given back via
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index e3b6e18fdac5..60a6088d0e5e 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -7,37 +7,37 @@ config XZ_DEC
CRC32 is supported. See Documentation/xz.txt for more information.
config XZ_DEC_X86
- bool "x86 BCJ filter decoder" if EMBEDDED
+ bool "x86 BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
config XZ_DEC_POWERPC
- bool "PowerPC BCJ filter decoder" if EMBEDDED
+ bool "PowerPC BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder" if EMBEDDED
+ bool "IA-64 BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
config XZ_DEC_ARM
- bool "ARM BCJ filter decoder" if EMBEDDED
+ bool "ARM BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
config XZ_DEC_ARMTHUMB
- bool "ARM-Thumb BCJ filter decoder" if EMBEDDED
+ bool "ARM-Thumb BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
config XZ_DEC_SPARC
- bool "SPARC BCJ filter decoder" if EMBEDDED
+ bool "SPARC BCJ filter decoder" if EXPERT
default y
depends on XZ_DEC
select XZ_DEC_BCJ
diff --git a/mm/compaction.c b/mm/compaction.c
index 6d592a021072..8be430b812de 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -406,6 +406,10 @@ static int compact_finished(struct zone *zone,
if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
return COMPACT_CONTINUE;
+ /*
+ * order == -1 is expected when compacting via
+ * /proc/sys/vm/compact_memory
+ */
if (cc->order == -1)
return COMPACT_CONTINUE;
@@ -454,6 +458,13 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_SKIPPED;
/*
+ * order == -1 is expected when compacting via
+ * /proc/sys/vm/compact_memory
+ */
+ if (order == -1)
+ return COMPACT_CONTINUE;
+
+ /*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 004c9c2aac78..e187454d82f6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1203,6 +1203,8 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
+ mem_cgroup_split_huge_fixup(page, page_tail);
+
lru_add_page_tail(zone, page, page_tail);
}
@@ -1837,9 +1839,9 @@ static void collapse_huge_page(struct mm_struct *mm,
spin_lock(ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
spin_unlock(ptl);
- pte_unmap(pte);
if (unlikely(!isolated)) {
+ pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
set_pmd_at(mm, address, pmd, _pmd);
@@ -1856,6 +1858,7 @@ static void collapse_huge_page(struct mm_struct *mm,
anon_vma_unlock(vma->anon_vma);
__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+ pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
VM_BUG_ON(page_count(pgtable) != 1);
diff --git a/mm/memblock.c b/mm/memblock.c
index 400dc62697d7..bdba245d8afd 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -683,13 +683,13 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
{
- int idx = memblock_search(&memblock.reserved, base);
+ int idx = memblock_search(&memblock.memory, base);
if (idx == -1)
return 0;
- return memblock.reserved.regions[idx].base <= base &&
- (memblock.reserved.regions[idx].base +
- memblock.reserved.regions[idx].size) >= (base + size);
+ return memblock.memory.regions[idx].base <= base &&
+ (memblock.memory.regions[idx].base +
+ memblock.memory.regions[idx].size) >= (base + size);
}
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8ab841031436..db76ef726293 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -600,23 +600,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
- struct page_cgroup *pc,
- bool charge)
+ bool file, int nr_pages)
{
- int val = (charge) ? 1 : -1;
-
preempt_disable();
- if (PageCgroupCache(pc))
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
+ if (file)
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
else
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
- if (charge)
+ /* pagein of a big page is an event. So, ignore page size */
+ if (nr_pages > 0)
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
else
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
- __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
+
+ __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
preempt_enable();
}
@@ -815,7 +814,8 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
* removed from global LRU.
*/
mz = page_cgroup_zoneinfo(pc);
- MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+ /* huge page split is done under lru_lock. so, we have no races. */
+ MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
if (mem_cgroup_is_root(pc->mem_cgroup))
return;
VM_BUG_ON(list_empty(&pc->lru));
@@ -836,13 +836,12 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
return;
pc = lookup_page_cgroup(page);
- /*
- * Used bit is set without atomic ops but after smp_wmb().
- * For making pc->mem_cgroup visible, insert smp_rmb() here.
- */
- smp_rmb();
/* unused or root page is not rotated. */
- if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
+ if (!PageCgroupUsed(pc))
+ return;
+ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+ smp_rmb();
+ if (mem_cgroup_is_root(pc->mem_cgroup))
return;
mz = page_cgroup_zoneinfo(pc);
list_move(&pc->lru, &mz->lists[lru]);
@@ -857,16 +856,13 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
return;
pc = lookup_page_cgroup(page);
VM_BUG_ON(PageCgroupAcctLRU(pc));
- /*
- * Used bit is set without atomic ops but after smp_wmb().
- * For making pc->mem_cgroup visible, insert smp_rmb() here.
- */
- smp_rmb();
if (!PageCgroupUsed(pc))
return;
-
+ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+ smp_rmb();
mz = page_cgroup_zoneinfo(pc);
- MEM_CGROUP_ZSTAT(mz, lru) += 1;
+ /* huge page split is done under lru_lock. so, we have no races. */
+ MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
SetPageCgroupAcctLRU(pc);
if (mem_cgroup_is_root(pc->mem_cgroup))
return;
@@ -1030,14 +1026,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL;
pc = lookup_page_cgroup(page);
- /*
- * Used bit is set without atomic ops but after smp_wmb().
- * For making pc->mem_cgroup visible, insert smp_rmb() here.
- */
- smp_rmb();
if (!PageCgroupUsed(pc))
return NULL;
-
+ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+ smp_rmb();
mz = page_cgroup_zoneinfo(pc);
if (!mz)
return NULL;
@@ -1615,7 +1607,7 @@ void mem_cgroup_update_page_stat(struct page *page,
if (unlikely(!mem || !PageCgroupUsed(pc)))
goto out;
/* pc->mem_cgroup is unstable ? */
- if (unlikely(mem_cgroup_stealed(mem))) {
+ if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
/* take a lock against to access pc->mem_cgroup */
move_lock_page_cgroup(pc, &flags);
need_unlock = true;
@@ -2084,14 +2076,27 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
return mem;
}
-/*
- * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
- * USED state. If already USED, uncharge and return.
- */
-static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
- struct page_cgroup *pc,
- enum charge_type ctype)
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+ struct page_cgroup *pc,
+ enum charge_type ctype,
+ int page_size)
{
+ int nr_pages = page_size >> PAGE_SHIFT;
+
+ /* try_charge() can return NULL to *memcg, taking care of it. */
+ if (!mem)
+ return;
+
+ lock_page_cgroup(pc);
+ if (unlikely(PageCgroupUsed(pc))) {
+ unlock_page_cgroup(pc);
+ mem_cgroup_cancel_charge(mem, page_size);
+ return;
+ }
+ /*
+ * we don't need page_cgroup_lock about tail pages, becase they are not
+ * accessed by any other context at this point.
+ */
pc->mem_cgroup = mem;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2115,35 +2120,7 @@ static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
break;
}
- mem_cgroup_charge_statistics(mem, pc, true);
-}
-
-static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
- struct page_cgroup *pc,
- enum charge_type ctype,
- int page_size)
-{
- int i;
- int count = page_size >> PAGE_SHIFT;
-
- /* try_charge() can return NULL to *memcg, taking care of it. */
- if (!mem)
- return;
-
- lock_page_cgroup(pc);
- if (unlikely(PageCgroupUsed(pc))) {
- unlock_page_cgroup(pc);
- mem_cgroup_cancel_charge(mem, page_size);
- return;
- }
-
- /*
- * we don't need page_cgroup_lock about tail pages, becase they are not
- * accessed by any other context at this point.
- */
- for (i = 0; i < count; i++)
- ____mem_cgroup_commit_charge(mem, pc + i, ctype);
-
+ mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
/*
* "charge_statistics" updated event counter. Then, check it.
@@ -2153,6 +2130,46 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
memcg_check_events(mem, pc->page);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
+ (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
+/*
+ * Because tail pages are not marked as "used", set it. We're under
+ * zone->lru_lock, 'splitting on pmd' and compund_lock.
+ */
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
+{
+ struct page_cgroup *head_pc = lookup_page_cgroup(head);
+ struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
+ unsigned long flags;
+
+ /*
+ * We have no races with charge/uncharge but will have races with
+ * page state accounting.
+ */
+ move_lock_page_cgroup(head_pc, &flags);
+
+ tail_pc->mem_cgroup = head_pc->mem_cgroup;
+ smp_wmb(); /* see __commit_charge() */
+ if (PageCgroupAcctLRU(head_pc)) {
+ enum lru_list lru;
+ struct mem_cgroup_per_zone *mz;
+
+ /*
+ * LRU flags cannot be copied because we need to add tail
+ *.page to LRU by generic call and our hook will be called.
+ * We hold lru_lock, then, reduce counter directly.
+ */
+ lru = page_lru(head);
+ mz = page_cgroup_zoneinfo(head_pc);
+ MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+ }
+ tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
+ move_unlock_page_cgroup(head_pc, &flags);
+}
+#endif
+
/**
* __mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
@@ -2171,8 +2188,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
*/
static void __mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+ struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
+ int charge_size)
{
+ int nr_pages = charge_size >> PAGE_SHIFT;
+
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
VM_BUG_ON(!page_is_cgroup_locked(pc));
@@ -2186,14 +2206,14 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
preempt_enable();
}
- mem_cgroup_charge_statistics(from, pc, false);
+ mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
- mem_cgroup_cancel_charge(from, PAGE_SIZE);
+ mem_cgroup_cancel_charge(from, charge_size);
/* caller should have done css_get */
pc->mem_cgroup = to;
- mem_cgroup_charge_statistics(to, pc, true);
+ mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
@@ -2208,15 +2228,19 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
* __mem_cgroup_move_account()
*/
static int mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+ struct mem_cgroup *from, struct mem_cgroup *to,
+ bool uncharge, int charge_size)
{
int ret = -EINVAL;
unsigned long flags;
+ if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
+ return -EBUSY;
+
lock_page_cgroup(pc);
if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
move_lock_page_cgroup(pc, &flags);
- __mem_cgroup_move_account(pc, from, to, uncharge);
+ __mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
move_unlock_page_cgroup(pc, &flags);
ret = 0;
}
@@ -2241,6 +2265,8 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
struct cgroup *cg = child->css.cgroup;
struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
+ int charge = PAGE_SIZE;
+ unsigned long flags;
int ret;
/* Is ROOT ? */
@@ -2252,17 +2278,23 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
goto out;
if (isolate_lru_page(page))
goto put;
+ /* The page is isolated from LRU and we have no race with splitting */
+ charge = PAGE_SIZE << compound_order(page);
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
- PAGE_SIZE);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, charge);
if (ret || !parent)
goto put_back;
- ret = mem_cgroup_move_account(pc, child, parent, true);
+ if (charge > PAGE_SIZE)
+ flags = compound_lock_irqsave(page);
+
+ ret = mem_cgroup_move_account(pc, child, parent, true, charge);
if (ret)
- mem_cgroup_cancel_charge(parent, PAGE_SIZE);
+ mem_cgroup_cancel_charge(parent, charge);
put_back:
+ if (charge > PAGE_SIZE)
+ compound_unlock_irqrestore(page, flags);
putback_lru_page(page);
put:
put_page(page);
@@ -2546,7 +2578,6 @@ direct_uncharge:
static struct mem_cgroup *
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
{
- int i;
int count;
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
@@ -2596,8 +2627,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}
- for (i = 0; i < count; i++)
- mem_cgroup_charge_statistics(mem, pc + i, false);
+ mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
ClearPageCgroupUsed(pc);
/*
@@ -4844,7 +4874,7 @@ retry:
goto put;
pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(pc,
- mc.from, mc.to, false)) {
+ mc.from, mc.to, false, PAGE_SIZE)) {
mc.precharge--;
/* we uncharge from mc.from later. */
mc.moved_charge++;
diff --git a/mm/truncate.c b/mm/truncate.c
index 3c2d5ddfa0d4..49feb46e77b8 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -549,13 +549,12 @@ EXPORT_SYMBOL(truncate_pagecache);
* @inode: inode
* @newsize: new file size
*
- * truncate_setsize updastes i_size update and performs pagecache
- * truncation (if necessary) for a file size updates. It will be
- * typically be called from the filesystem's setattr function when
- * ATTR_SIZE is passed in.
+ * truncate_setsize updates i_size and performs pagecache truncation (if
+ * necessary) to @newsize. It will be typically be called from the filesystem's
+ * setattr function when ATTR_SIZE is passed in.
*
- * Must be called with inode_mutex held and after all filesystem
- * specific block truncation has been performed.
+ * Must be called with inode_mutex held and before all filesystem specific
+ * block truncation has been performed.
*/
void truncate_setsize(struct inode *inode, loff_t newsize)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 47a50962ce81..f5d90dedebba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,7 +41,6 @@
#include <linux/memcontrol.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
-#include <linux/compaction.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6e64f7c6a2e9..7850412f52b7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
- unsigned long old_features = vlandev->features;
+ u32 old_features = vlandev->features;
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 17c5ba7551a5..29a54ccd213d 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -59,7 +59,6 @@
* safely advertise a maxsize
* of 64k */
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
/**
* struct p9_trans_rdma - RDMA transport instance
*
diff --git a/net/Kconfig b/net/Kconfig
index 72840626284b..79cabf1ee68b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -221,6 +221,12 @@ config RPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config RFS_ACCEL
+ boolean
+ depends on RPS && GENERIC_HARDIRQS
+ select CPU_RMAP
+ default y
+
config XPS
boolean
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index d936aeccd194..2de93d00631b 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 3850a3ecf947..1997725a243b 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 71a91b3da913..6ce305b40017 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0ae81d07f102..0e9d43509935 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -52,7 +52,6 @@ static void emit_log_char(struct debug_log *debug_log, char c)
static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
{
- int printed_len;
va_list args;
static char debug_log_buf[256];
char *p;
@@ -62,8 +61,7 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
spin_lock_bh(&debug_log->lock);
va_start(args, fmt);
- printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
- fmt, args);
+ vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
va_end(args);
for (p = debug_log_buf; *p != 0; p++)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index 72df532b7d5f..bc9cda3f01e1 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd7bb51825f1..f7b93a0805fe 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 7f186c007b4f..02f1fa7aadfa 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index bbcd8f744cdd..ad2ca925b3e0 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index ac54017601b1..769c246d1fc1 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0065ffb8d96d..429a013d2e0a 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 4585e6549844..2aa439124ee3 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index b962982f017e..50d3a59a3d73 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 5e728d0b7959..55e527a489fe 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4f95777ce080..f2131f45aa9b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -34,6 +34,12 @@
/* protect update critical side of if_list - but not the content */
static DEFINE_SPINLOCK(if_list_lock);
+
+static int batman_skb_recv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+
static void hardif_free_rcu(struct rcu_head *rcu)
{
struct batman_if *batman_if;
@@ -549,8 +555,9 @@ out:
/* receive a packet with the batman ethertype coming on a hard
* interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
{
struct bat_priv *bat_priv;
struct batman_packet *batman_packet;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 30ec3b8db459..ad195438428a 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -35,10 +35,6 @@ struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
void hardif_disable_interface(struct batman_if *batman_if);
void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 26e623eb9def..fa2693973ab8 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 09216ade16f1..eae24402fd0a 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -49,11 +49,6 @@ struct hashtable_t {
/* allocates and clears the hash */
struct hashtable_t *hash_new(int size);
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove(). But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ecf6d7ffab2e..5e86d6f0c0fb 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index bf9b348cde27..08b185959501 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b827f6a158cb..dc9248d9ea5f 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d4d9926c2201..e235d7bbe045 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,9 +22,6 @@
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-/* Kernel Programming */
-#define LINUX
-
#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
"Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
-#define PACKBUFF_SIZE 2000
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
#define VIS_INTERVAL 5000 /* 5 seconds */
@@ -96,15 +92,11 @@
#define DBG_ROUTES 2 /* route or hna added / changed / deleted */
#define DBG_ALL 3
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-
/*
* Vis
*/
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
/*
* Kernel headers
*/
@@ -151,20 +143,13 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
} \
while (0)
#else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __attribute__((unused)),
- struct bat_priv *bat_priv __attribute__((unused)),
- char *fmt __attribute__((unused)), ...)
+static inline void bat_dbg(char type __always_unused,
+ struct bat_priv *bat_priv __always_unused,
+ char *fmt __always_unused, ...)
{
}
#endif
-#define bat_warning(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_warning("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
#define bat_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6b7fb6b7e6f9..54863c9385de 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -247,7 +247,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
orig_node->hna_buff_len);
/* update bonding candidates, we could have lost
* some candidates. */
- update_bonding_candidates(bat_priv, orig_node);
+ update_bonding_candidates(orig_node);
}
}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d474ceb2a4eb..8019fbddffd0 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b49fdf70a6d5..e7571879af3f 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -50,6 +50,7 @@
/* fragmentation defines */
#define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
struct batman_packet {
uint8_t packet_type;
@@ -63,7 +64,7 @@ struct batman_packet {
uint8_t num_hna;
uint8_t gw_flags; /* flags related to gateway class */
uint8_t align;
-} __attribute__((packed));
+} __packed;
#define BAT_PACKET_LEN sizeof(struct batman_packet)
@@ -76,7 +77,7 @@ struct icmp_packet {
uint8_t orig[6];
uint16_t seqno;
uint8_t uid;
-} __attribute__((packed));
+} __packed;
#define BAT_RR_LEN 16
@@ -93,14 +94,14 @@ struct icmp_packet_rr {
uint8_t uid;
uint8_t rr_cur;
uint8_t rr[BAT_RR_LEN][ETH_ALEN];
-} __attribute__((packed));
+} __packed;
struct unicast_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t dest[6];
uint8_t ttl;
-} __attribute__((packed));
+} __packed;
struct unicast_frag_packet {
uint8_t packet_type;
@@ -110,7 +111,7 @@ struct unicast_frag_packet {
uint8_t flags;
uint8_t orig[6];
uint16_t seqno;
-} __attribute__((packed));
+} __packed;
struct bcast_packet {
uint8_t packet_type;
@@ -118,7 +119,7 @@ struct bcast_packet {
uint8_t orig[6];
uint8_t ttl;
uint32_t seqno;
-} __attribute__((packed));
+} __packed;
struct vis_packet {
uint8_t packet_type;
@@ -131,6 +132,6 @@ struct vis_packet {
* neighbors */
uint8_t target_orig[6]; /* who should receive this packet */
uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
-} __attribute__((packed));
+} __packed;
#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index defd37c9be1f..5bb6a619afee 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 6b0cb9aaeba5..0395b2741864 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8828eddd3f72..028f73967b00 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -433,8 +433,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
}
/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
+static void mark_bonding_address(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet)
@@ -447,8 +446,7 @@ static void mark_bonding_address(struct bat_priv *bat_priv,
}
/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+void update_bonding_candidates(struct orig_node *orig_node)
{
int candidates;
int interference_candidate;
@@ -730,9 +728,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
update_orig(bat_priv, orig_node, ethhdr, batman_packet,
if_incoming, hna_buff, hna_buff_len, is_duplicate);
- mark_bonding_address(bat_priv, orig_node,
- orig_neigh_node, batman_packet);
- update_bonding_candidates(bat_priv, orig_node);
+ mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
+ update_bonding_candidates(orig_node);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -810,13 +807,11 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
{
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -848,7 +843,6 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -866,17 +860,15 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
}
static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+ struct sk_buff *skb)
{
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -909,7 +901,6 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -978,7 +969,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
+ return recv_icmp_ttl_exceeded(bat_priv, skb);
ret = NET_RX_DROP;
@@ -1001,7 +992,6 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
icmp_packet->ttl--;
@@ -1193,7 +1183,7 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
dstaddr);
if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
- 2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+ frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index f108f230bfdb..ceeca6f6ad16 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -42,7 +42,6 @@ int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node);
+void update_bonding_candidates(struct orig_node *orig_node);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index b89b9f7709ae..7cc620e8aa1e 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -49,7 +49,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv)
}
/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
{
return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
}
@@ -356,7 +356,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
else
batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time(bat_priv);
+ send_time = forward_send_time();
add_bat_packet_to_list(bat_priv,
(unsigned char *)batman_packet,
sizeof(struct batman_packet) + hna_buff_len,
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c4cefa8e4f85..bc53adede58d 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e89ede192ed0..145e0f782923 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02b77334d10d..e7b0e1a34a55 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a633b5a435e2..f6917dde42ce 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 10c4c5c319b6..a4f3a37fd6ed 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 97cb23dd3e69..7270405046e9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -246,13 +246,13 @@ struct vis_info {
/* this packet might be part of the vis send queue. */
struct sk_buff *skb_packet;
/* vis_info may follow here*/
-} __attribute__((packed));
+} __packed;
struct vis_info_entry {
uint8_t src[ETH_ALEN];
uint8_t dest[ETH_ALEN];
uint8_t quality; /* quality = 0 means HNA */
-} __attribute__((packed));
+} __packed;
struct recvlist_node {
struct list_head list;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc2e28bed844..cbf022cb3121 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -224,16 +224,20 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(struct unicast_packet);
int ucf_hdr_len = sizeof(struct unicast_frag_packet);
- int data_len = skb->len;
+ int data_len = skb->len - uc_hdr_len;
+ int large_tail = 0;
if (!bat_priv->primary_if)
goto dropped;
- unicast_packet = (struct unicast_packet *) skb->data;
+ frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
+ if (!frag_skb)
+ goto dropped;
+ skb_reserve(frag_skb, ucf_hdr_len);
+ unicast_packet = (struct unicast_packet *) skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
- frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
- skb_split(skb, frag_skb, data_len / 2);
+ skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -251,8 +255,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
- frag1->flags |= UNI_FRAG_HEAD;
- frag2->flags &= ~UNI_FRAG_HEAD;
+ if (data_len & 1)
+ large_tail = UNI_FRAG_LARGETAIL;
+
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
frag1->seqno = htons((uint16_t)atomic_inc_return(
&batman_if->frag_seqno));
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index e32b7867a9a4..8897308281d4 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -22,6 +22,8 @@
#ifndef _NET_BATMAN_ADV_UNICAST_H_
#define _NET_BATMAN_ADV_UNICAST_H_
+#include "packet.h"
+
#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
@@ -32,4 +34,25 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct batman_if *batman_if, uint8_t dstaddr[]);
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+ struct unicast_frag_packet *unicast_packet;
+ int uneven_correction = 0;
+ unsigned int merged_size;
+
+ unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+ if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+ if (unicast_packet->flags & UNI_FRAG_HEAD)
+ uneven_correction = 1;
+ else
+ uneven_correction = -1;
+ }
+
+ merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+ merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+ return merged_size <= mtu;
+}
+
#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cd4c4231fa48..7db9ad82cc00 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -64,6 +64,7 @@ static void free_info(struct kref *ref)
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
+ kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
@@ -268,10 +269,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
- for (i = 0; i < packet->entries; i++)
+ for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
- &entries[i],
+ &entries[j],
entry->addr,
entry->primary);
@@ -444,7 +445,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
info);
if (hash_added < 0) {
/* did not work (for some reason) */
- kref_put(&old_info->refcount, free_info);
+ kref_put(&info->refcount, free_info);
info = NULL;
}
@@ -815,7 +816,7 @@ static void send_vis_packets(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
- struct vis_info *info, *temp;
+ struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
@@ -825,8 +826,9 @@ static void send_vis_packets(struct work_struct *work)
send_list_add(bat_priv, bat_priv->my_vis_info);
}
- list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
- send_list) {
+ while (!list_empty(&bat_priv->vis_send_list)) {
+ info = list_first_entry(&bat_priv->vis_send_list,
+ typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 2c3b33089a9b..31b820d07f23 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index d9d1e2bac1d6..2a6801d8b728 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -365,7 +365,7 @@ int br_min_mtu(const struct net_bridge *br)
void br_features_recompute(struct net_bridge *br)
{
struct net_bridge_port *p;
- unsigned long features, mask;
+ u32 features, mask;
features = mask = br->feature_mask;
if (list_empty(&br->port_list))
@@ -379,7 +379,7 @@ void br_features_recompute(struct net_bridge *br)
}
done:
- br->dev->features = netdev_fix_features(features, NULL);
+ br->dev->features = netdev_fix_features(br->dev, features);
}
/* called with RTNL */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac7734bfc..9f22898c5359 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -182,7 +182,7 @@ struct net_bridge
struct br_cpu_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
- unsigned long feature_mask;
+ u32 feature_mask;
#ifdef CONFIG_BRIDGE_NETFILTER
struct rtable fake_rtable;
bool nf_call_iptables;
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 50a46afc2bcc..2ed0056a39a8 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -22,9 +22,15 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
-struct tcpudphdr {
- __be16 src;
- __be16 dst;
+union pkthdr {
+ struct {
+ __be16 src;
+ __be16 dst;
+ } tcpudphdr;
+ struct {
+ u8 type;
+ u8 code;
+ } icmphdr;
};
static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
- const struct tcpudphdr *pptr;
- struct tcpudphdr _ports;
+ const union pkthdr *pptr;
+ union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
- if (!(info->bitmask & EBT_IP6_DPORT) &&
- !(info->bitmask & EBT_IP6_SPORT))
+ if (!(info->bitmask & ( EBT_IP6_DPORT |
+ EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
- pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
- &_ports);
+
+ /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+ &_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
- u32 dst = ntohs(pptr->dst);
+ u16 dst = ntohs(pptr->tcpudphdr.dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1], EBT_IP6_DPORT))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
- u32 src = ntohs(pptr->src);
+ u16 src = ntohs(pptr->tcpudphdr.src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1], EBT_IP6_SPORT))
return false;
}
- return true;
+ if ((info->bitmask & EBT_IP6_ICMP6) &&
+ FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+ pptr->icmphdr.type > info->icmpv6_type[1] ||
+ pptr->icmphdr.code < info->icmpv6_code[0] ||
+ pptr->icmphdr.code > info->icmpv6_code[1],
+ EBT_IP6_ICMP6))
+ return false;
}
return true;
}
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
+ if (info->bitmask & EBT_IP6_ICMP6) {
+ if ((info->invflags & EBT_IP6_PROTO) ||
+ info->protocol != IPPROTO_ICMPV6)
+ return -EINVAL;
+ if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+ info->icmpv6_code[0] > info->icmpv6_code[1])
+ return -EINVAL;
+ }
return 0;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 16df0532d4b9..5f1825df9dca 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1764,6 +1764,7 @@ static int compat_table_info(const struct ebt_table_info *info,
newinfo->entries_size = size;
+ xt_compat_init_offsets(AF_INET, info->nentries);
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 21ede141018a..f1f98d967d8a 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -23,10 +23,8 @@
#include <asm/atomic.h>
#define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
@@ -191,6 +189,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
struct cflayer *servl = NULL;
struct cfcnfg_phyinfo *phyinfo = NULL;
u8 phyid = 0;
+
caif_assert(adap_layer != NULL);
channel_id = adap_layer->id;
if (adap_layer->dn == NULL || channel_id == 0) {
@@ -199,16 +198,16 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
goto end;
}
servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
- if (servl == NULL)
- goto end;
- layer_set_up(servl, NULL);
- ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
if (servl == NULL) {
pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
channel_id);
ret = -EINVAL;
goto end;
}
+ layer_set_up(servl, NULL);
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+ if (ret)
+ goto end;
caif_assert(channel_id == servl->id);
if (adap_layer->dn != NULL) {
phyid = cfsrvl_getphyid(adap_layer->dn);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index d3ed264ad6c4..27dab26ad3b8 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -18,7 +18,6 @@
#define DGM_CMD_BIT 0x80
#define DGM_FLOW_OFF 0x81
#define DGM_FLOW_ON 0x80
-#define DGM_CTRL_PKT_SIZE 1
#define DGM_MTU 1500
static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 9297f7dea9d8..8303fe3ebf89 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -25,7 +25,6 @@ struct cfserl {
spinlock_t sync;
bool usestx;
};
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index efad410e4c82..315c0d601368 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -20,7 +20,7 @@
#define UTIL_REMOTE_SHUTDOWN 0x82
#define UTIL_FLOW_OFF 0x81
#define UTIL_FLOW_ON 0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3b425b189a99..c3b1dec4acf6 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -17,7 +17,7 @@
#define VEI_FLOW_OFF 0x81
#define VEI_FLOW_ON 0x80
#define VEI_SET_PIN 0x82
-#define VEI_CTRL_PKT_SIZE 1
+
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9d5e8accfab1..092dc88a7c64 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1256,6 +1256,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sockaddr_can *addr =
(struct sockaddr_can *)msg->msg_name;
+ if (msg->msg_namelen < sizeof(*addr))
+ return -EINVAL;
+
if (addr->can_family != AF_CAN)
return -EINVAL;
diff --git a/net/can/raw.c b/net/can/raw.c
index e88f610fdb7b..883e9d74fddf 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -649,6 +649,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sockaddr_can *addr =
(struct sockaddr_can *)msg->msg_name;
+ if (msg->msg_namelen < sizeof(*addr))
+ return -EINVAL;
+
if (addr->can_family != AF_CAN)
return -EINVAL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 54277df0f735..9109e2648d4d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,7 @@
#include <trace/events/skb.h>
#include <linux/pci.h>
#include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
#include "net-sysfs.h"
@@ -749,7 +750,8 @@ EXPORT_SYMBOL(dev_get_by_index);
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
- * is not found or a pointer to the device. The caller must hold RCU
+ * is not found or a pointer to the device.
+ * The caller must hold RCU or RTNL.
* The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
@@ -1285,7 +1287,7 @@ static int __dev_close(struct net_device *dev)
return __dev_close_many(&single);
}
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(tmp_list);
@@ -1593,6 +1595,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+ int i;
+ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+ /* If TC0 is invalidated disable TC mapping */
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues changed "
+ "invalidating tc mappings. Priority "
+ "traffic classification disabled!\n");
+ dev->num_tc = 0;
+ return;
+ }
+
+ /* Invalidated prio to tc mappings set to TC0 */
+ for (i = 1; i < TC_BITMASK + 1; i++) {
+ int q = netdev_get_prio_tc_map(dev, i);
+
+ tc = &dev->tc_to_txq[q];
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues "
+ "changed. Priority %i to tc "
+ "mapping %i is no longer valid "
+ "setting map to 0\n",
+ i, q);
+ netdev_set_prio_tc_map(dev, i, 0);
+ }
+ }
+}
+
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1612,6 +1656,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (rc)
return rc;
+ if (dev->num_tc)
+ netif_setup_tc(dev, txq);
+
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
@@ -1811,7 +1858,7 @@ EXPORT_SYMBOL(skb_checksum_help);
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
@@ -1999,9 +2046,9 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
protocol == htons(ETH_P_FCOE)));
}
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
{
- if (!can_checksum_protocol(protocol, features)) {
+ if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
features &= ~NETIF_F_SG;
} else if (illegal_highdma(skb->dev, skb)) {
@@ -2011,10 +2058,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
return features;
}
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- int features = skb->dev->features;
+ u32 features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2023,13 +2070,13 @@ int netif_skb_features(struct sk_buff *skb)
return harmonize_features(skb, protocol, features);
}
- features &= skb->dev->vlan_features;
+ features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
if (protocol != htons(ETH_P_8021Q)) {
return harmonize_features(skb, protocol, features);
} else {
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM;
+ NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
return harmonize_features(skb, protocol, features);
}
}
@@ -2059,7 +2106,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
- int features;
+ u32 features;
/*
* If device doesnt need skb->dst, release it right now while
@@ -2161,6 +2208,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
unsigned int num_tx_queues)
{
u32 hash;
+ u16 qoffset = 0;
+ u16 qcount = num_tx_queues;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
@@ -2169,13 +2218,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
return hash;
}
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ qoffset = dev->tc_to_txq[tc].offset;
+ qcount = dev->tc_to_txq[tc].count;
+ }
+
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
- return (u16) (((u64) hash * num_tx_queues) >> 32);
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
}
EXPORT_SYMBOL(__skb_tx_hash);
@@ -2272,15 +2327,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
- bool contended = qdisc_is_running(q);
+ bool contended;
int rc;
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC_STATE_RUNNING owner to get the lock more often
* and dequeue packets faster.
*/
+ contended = qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);
@@ -2298,7 +2356,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
- qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2313,7 +2370,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
- rc = qdisc_enqueue_root(skb, q);
+ rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -2532,6 +2589,53 @@ EXPORT_SYMBOL(__skb_get_rxhash);
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow *rflow, u16 next_cpu)
+{
+ u16 tcpu;
+
+ tcpu = rflow->cpu = next_cpu;
+ if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+ struct netdev_rx_queue *rxqueue;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *old_rflow;
+ u32 flow_id;
+ u16 rxq_index;
+ int rc;
+
+ /* Should we steer this flow to a different hardware queue? */
+ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap)
+ goto out;
+ rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+ if (rxq_index == skb_get_rx_queue(skb))
+ goto out;
+
+ rxqueue = dev->_rx + rxq_index;
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (!flow_table)
+ goto out;
+ flow_id = skb->rxhash & flow_table->mask;
+ rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+ rxq_index, flow_id);
+ if (rc < 0)
+ goto out;
+ old_rflow = rflow;
+ rflow = &flow_table->flows[flow_id];
+ rflow->cpu = next_cpu;
+ rflow->filter = rc;
+ if (old_rflow->filter == rflow->filter)
+ old_rflow->filter = RPS_NO_FILTER;
+ out:
+#endif
+ rflow->last_qtail =
+ per_cpu(softnet_data, tcpu).input_queue_head;
+ }
+
+ return rflow;
+}
+
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
@@ -2602,12 +2706,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0)) {
- tcpu = rflow->cpu = next_cpu;
- if (tcpu != RPS_NO_CPU)
- rflow->last_qtail = per_cpu(softnet_data,
- tcpu).input_queue_head;
- }
+ rflow->last_qtail)) >= 0))
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
@@ -2628,6 +2729,46 @@ done:
return cpu;
}
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id)
+{
+ struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *rflow;
+ bool expire = true;
+ int cpu;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (flow_table && flow_id <= flow_table->mask) {
+ rflow = &flow_table->flows[flow_id];
+ cpu = ACCESS_ONCE(rflow->cpu);
+ if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+ rflow->last_qtail) <
+ (int)(10 * flow_table->mask)))
+ expire = false;
+ }
+ rcu_read_unlock();
+ return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{
@@ -3423,6 +3564,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
skb->vlan_tci = 0;
+ skb->dev = napi->dev;
napi->skb = skb;
}
@@ -3910,12 +4052,15 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev = (v == SEQ_START_TOKEN) ?
- first_net_device(seq_file_net(seq)) :
- next_net_device((struct net_device *)v);
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ dev = first_net_device_rcu(seq_file_net(seq));
+ else
+ dev = next_net_device_rcu(dev);
++*pos;
- return rcu_dereference(dev);
+ return dev;
}
void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4572,6 +4717,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
EXPORT_SYMBOL(dev_set_mtu);
/**
+ * dev_set_group - Change group this device belongs to
+ * @dev: device
+ * @new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+ dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
+/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
@@ -5061,41 +5217,49 @@ static void rollback_registered(struct net_device *dev)
rollback_registered_many(&single);
}
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
{
+ /* Fix illegal checksum combinations */
+ if ((features & NETIF_F_HW_CSUM) &&
+ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed HW and IP checksum settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
+
+ if ((features & NETIF_F_NO_CSUM) &&
+ (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed no checksumming and other settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ }
+
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
- "checksum feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_SG since no checksum feature.\n");
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
- "SG feature.\n", name);
+ netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
features &= ~NETIF_F_TSO;
}
+ /* UFO needs SG and checksumming */
if (features & NETIF_F_UFO) {
/* maybe split UFO into V4 and V6? */
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no checksum offload features.\n",
- name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no checksum offload features.\n");
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no NETIF_F_SG feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
features &= ~NETIF_F_UFO;
}
}
@@ -5238,22 +5402,7 @@ int register_netdevice(struct net_device *dev)
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
- /* Fix illegal checksum combinations */
- if ((dev->features & NETIF_F_HW_CSUM) &&
- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
- }
-
- if ((dev->features & NETIF_F_NO_CSUM) &&
- (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
- }
-
- dev->features = netdev_fix_features(dev->features, dev->name);
+ dev->features = netdev_fix_features(dev, dev->features);
/* Enable software GSO if SG is supported. */
if (dev->features & NETIF_F_SG)
@@ -5678,6 +5827,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
strcpy(dev->name, name);
+ dev->group = INIT_NETDEV_GROUP;
return dev;
free_pcpu:
@@ -5988,8 +6138,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
{
/* If device needs checksumming, downgrade to it. */
if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
diff --git a/net/core/dst.c b/net/core/dst.c
index b99c7c7ffce2..c1674fde827d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -164,6 +164,8 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
+const u32 dst_default_metrics[RTAX_MAX];
+
void *dst_alloc(struct dst_ops *ops)
{
struct dst_entry *dst;
@@ -180,6 +182,7 @@ void *dst_alloc(struct dst_ops *ops)
dst->lastuse = jiffies;
dst->path = dst;
dst->input = dst->output = dst_discard;
+ dst_init_metrics(dst, dst_default_metrics, true);
#if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total);
#endif
@@ -282,6 +285,42 @@ void dst_release(struct dst_entry *dst)
}
EXPORT_SYMBOL(dst_release);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+ if (p) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ kfree(p);
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false. */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ unsigned long prev, new;
+
+ new = (unsigned long) dst_default_metrics;
+ prev = cmpxchg(&dst->_metrics, old, new);
+ if (prev == old)
+ kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
/**
* skb_dst_set_noref - sets skb dst, without a reference
* @skb: buffer
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 17741782a345..5984ee0c7136 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vmalloc(reglen);
+ regbuf = vzalloc(reglen);
if (!regbuf)
return -ENOMEM;
@@ -1458,7 +1458,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
void __user *useraddr = ifr->ifr_data;
u32 ethcmd;
int rc;
- unsigned long old_features;
+ u32 old_features;
if (!dev || !netif_device_present(dev))
return -ENODEV;
diff --git a/net/core/filter.c b/net/core/filter.c
index afc58374ca96..232b1873bb28 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
if (err)
return err;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return err;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 60a902913429..799f06e03a22 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
{
size_t size = entries * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
- struct neighbour **buckets;
+ struct neighbour __rcu **buckets;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
@@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
if (size <= PAGE_SIZE)
buckets = kzalloc(size, GFP_ATOMIC);
else
- buckets = (struct neighbour **)
+ buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
if (!buckets) {
kfree(ret);
return NULL;
}
- rcu_assign_pointer(ret->hash_buckets, buckets);
+ ret->hash_buckets = buckets;
ret->hash_mask = entries - 1;
get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
return ret;
@@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct neigh_hash_table,
rcu);
size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
- struct neighbour **buckets = nht->hash_buckets;
+ struct neighbour __rcu **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
kfree(buckets);
@@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
panic("cannot create neighbour proc dir entry");
#endif
- tbl->nht = neigh_hash_alloc(8);
+ RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@ int neigh_table_clear(struct neigh_table *tbl)
}
write_unlock(&neigh_tbl_lock);
- call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+ call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+ neigh_hash_free_rcu);
tbl->nht = NULL;
kfree(tbl->phash_buckets);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e23c01be5a5b..2e4a393dfc3b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
NETDEVICE_SHOW(addr_len, fmt_dec);
NETDEVICE_SHOW(iflink, fmt_dec);
NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
NETDEVICE_SHOW(type, fmt_dec);
NETDEVICE_SHOW(link_mode, fmt_dec);
@@ -295,6 +295,20 @@ static ssize_t show_ifalias(struct device *dev,
return ret;
}
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+ dev_set_group(net, (int) new_group);
+ return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_group);
+}
+
static struct device_attribute net_class_attributes[] = {
__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@ static struct device_attribute net_class_attributes[] = {
__ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
__ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
store_tx_queue_len),
+ __ATTR(group, S_IRUGO | S_IWUSR, show_group, store_group),
{}
};
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..d73b77adb676 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -251,6 +251,7 @@ struct pktgen_dev {
int max_pkt_size; /* = ETH_ZLEN; */
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
+ struct page *page;
u64 delay; /* nano-seconds */
__u64 count; /* Default No packets to send */
@@ -1134,6 +1135,10 @@ static ssize_t pktgen_if_write(struct file *file,
if (node_possible(value)) {
pkt_dev->node = value;
sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+ if (pkt_dev->page) {
+ put_page(pkt_dev->page);
+ pkt_dev->page = NULL;
+ }
}
else
sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,90 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi,
return htons(id | (cfi << 12) | (prio << 13));
}
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+ int datalen)
+{
+ struct timeval timestamp;
+ struct pktgen_hdr *pgh;
+
+ pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+ datalen -= sizeof(*pgh);
+
+ if (pkt_dev->nfrags <= 0) {
+ pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
+ memset(pgh + 1, 0, datalen);
+ } else {
+ int frags = pkt_dev->nfrags;
+ int i, len;
+
+
+ if (frags > MAX_SKB_FRAGS)
+ frags = MAX_SKB_FRAGS;
+ len = datalen - frags * PAGE_SIZE;
+ if (len > 0) {
+ memset(skb_put(skb, len), 0, len);
+ datalen = frags * PAGE_SIZE;
+ }
+
+ i = 0;
+ while (datalen > 0) {
+ if (unlikely(!pkt_dev->page)) {
+ int node = numa_node_id();
+
+ if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+ node = pkt_dev->node;
+ pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!pkt_dev->page)
+ break;
+ }
+ skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+ get_page(pkt_dev->page);
+ skb_shinfo(skb)->frags[i].page_offset = 0;
+ skb_shinfo(skb)->frags[i].size =
+ (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+ datalen -= skb_shinfo(skb)->frags[i].size;
+ skb->len += skb_shinfo(skb)->frags[i].size;
+ skb->data_len += skb_shinfo(skb)->frags[i].size;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+
+ while (i < frags) {
+ int rem;
+
+ if (i == 0)
+ break;
+
+ rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+ if (rem == 0)
+ break;
+
+ skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+ skb_shinfo(skb)->frags[i] =
+ skb_shinfo(skb)->frags[i - 1];
+ get_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb)->frags[i].page =
+ skb_shinfo(skb)->frags[i - 1].page;
+ skb_shinfo(skb)->frags[i].page_offset +=
+ skb_shinfo(skb)->frags[i - 1].size;
+ skb_shinfo(skb)->frags[i].size = rem;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+ }
+
+ /* Stamp the time, and sequence number,
+ * convert them to network byte order
+ */
+ pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+ pgh->seq_num = htonl(pkt_dev->seq_num);
+
+ do_gettimeofday(&timestamp);
+ pgh->tv_sec = htonl(timestamp.tv_sec);
+ pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@@ -2613,7 +2702,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct udphdr *udph;
int datalen, iplen;
struct iphdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IP);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2817,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
pkt_dev->pkt_overhead);
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
-
- if (pkt_dev->nfrags <= 0) {
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
- } else {
- int frags = pkt_dev->nfrags;
- int i, len;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- len = datalen - frags * PAGE_SIZE;
- memset(skb_put(skb, len), 0, len);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2999,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct udphdr *udph;
int datalen;
struct ipv6hdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3101,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
- int frags = pkt_dev->nfrags;
- int i;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- * should we update cloned packets too ?
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
- /* pkt_dev->seq_num++; FF: you really mean this? */
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
}
@@ -3884,6 +3834,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
free_SAs(pkt_dev);
#endif
vfree(pkt_dev->flows);
+ if (pkt_dev->page)
+ put_page(pkt_dev->page);
kfree(pkt_dev);
return 0;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a5f7535aab5b..da0fe457c858 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -868,6 +868,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+ NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
if (dev->ifindex != dev->iflink)
NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1121,8 +1122,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
- err = af_ops->validate_link_af(dev,
- tb[IFLA_AF_SPEC]);
+ err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
@@ -1265,6 +1265,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
modified = 1;
}
+ if (tb[IFLA_GROUP]) {
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+ modified = 1;
+ }
+
/*
* Interface selected by interface index but interface
* name provided implies that a name change has been
@@ -1542,6 +1547,8 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ if (tb[IFLA_GROUP])
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
return dev;
@@ -1552,6 +1559,24 @@ err:
}
EXPORT_SYMBOL(rtnl_create_link);
+static int rtnl_group_changelink(struct net *net, int group,
+ struct ifinfomsg *ifm,
+ struct nlattr **tb)
+{
+ struct net_device *dev;
+ int err;
+
+ for_each_netdev(net, dev) {
+ if (dev->group == group) {
+ err = do_setlink(dev, ifm, tb, NULL, 0);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
@@ -1579,10 +1604,12 @@ replay:
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
- else if (ifname[0])
- dev = __dev_get_by_name(net, ifname);
- else
- dev = NULL;
+ else {
+ if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ dev = NULL;
+ }
err = validate_linkmsg(dev, tb);
if (err < 0)
@@ -1646,8 +1673,13 @@ replay:
return do_setlink(dev, ifm, tb, ifname, modified);
}
- if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ return rtnl_group_changelink(net,
+ nla_get_u32(tb[IFLA_GROUP]),
+ ifm, tb);
return -ENODEV;
+ }
if (ifm->ifi_index)
return -EOPNOTSUPP;
@@ -1672,6 +1704,9 @@ replay:
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+
dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))
@@ -1820,7 +1855,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM;
- if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d31bb36ae0dc..14cf560b4a3e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ kmemcheck_annotate_variable(shinfo->destructor_arg);
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -2497,7 +2498,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
@@ -2507,7 +2508,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
unsigned int offset = doffset;
unsigned int headroom;
unsigned int len;
- int sg = features & NETIF_F_SG;
+ int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
@@ -2744,8 +2745,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
merge:
if (offset > headlen) {
- skbinfo->frags[0].page_offset += offset - headlen;
- skbinfo->frags[0].size -= offset - headlen;
+ unsigned int eat = offset - headlen;
+
+ skbinfo->frags[0].page_offset += eat;
+ skbinfo->frags[0].size -= eat;
+ skb->data_len -= eat;
+ skb->len -= eat;
offset = headlen;
}
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d900ab99814a..6b03f561caec 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -583,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
u8 up, idtype;
int ret = -EINVAL;
- if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+ if (!tb[DCB_ATTR_APP])
goto out;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
goto out;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
- up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+ if (netdev->dcbnl_ops->getapp) {
+ up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+ } else {
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ up = dcb_getapp(netdev, &app);
+ }
/* send this back */
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e96d5e810039..fadecd20d75b 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -583,6 +583,15 @@ done:
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+ return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5e636365d33c..42c9c62d3417 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,6 +112,7 @@ static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@ static struct dst_ops dn_dst_ops = {
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
.default_mtu = dn_dst_default_mtu,
+ .cow_metrics = dst_cow_metrics_generic,
+ .destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
};
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+ dst_destroy_metrics_generic(dst);
+}
+
static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
@@ -814,14 +822,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
struct net_device *dev = rt->dst.dev;
+ unsigned int mss_metric;
struct neighbour *n;
- unsigned int metric;
if (fi) {
if (DN_FIB_RES_GW(*res) &&
DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = DN_FIB_RES_GW(*res);
- dst_import_metrics(&rt->dst, fi->fib_metrics);
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
rt->rt_type = res->type;
@@ -834,10 +842,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
- metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
- if (metric) {
+ mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+ if (mss_metric) {
unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
- if (metric > mss)
+ if (mss_metric > mss)
dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
}
return 0;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f2abd3755690..b66600b3f4b5 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -59,7 +59,6 @@ struct dn_hash
};
#define dz_key_0(key) ((key).datum = 0)
-#define dz_prefix(key,dz) ((key).datum)
#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a74e1f4..3fb14b7c13cf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
}
module_exit(dsa_cleanup_module);
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a586b4..0c2826337919 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock *sk = sock->sk;
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+ struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
@@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
error_free_buf:
vfree(userbuf);
+error:
#else
err = -EPROTOTYPE;
#endif
- error:
mutex_unlock(&econet_mutex);
return err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index a5a1050595d1..cbb505ba9324 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -55,45 +55,9 @@ config IP_ADVANCED_ROUTER
If unsure, say N here.
-choice
- prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
- depends on IP_ADVANCED_ROUTER
- default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
- bool "FIB_HASH"
- ---help---
- Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
- bool "FIB_TRIE"
- ---help---
- Use new experimental LC-trie as FIB lookup algorithm.
- This improves lookup performance if you have a large
- number of routes.
-
- LC-trie is a longest matching prefix lookup algorithm which
- performs better than FIB_HASH for large routing tables.
- But, it consumes more memory and is more complex.
-
- LC-trie is described in:
-
- IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
- IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
- June 1999
-
- An experimental study of compression methods for dynamic tries
- Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
- <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
- def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
config IP_FIB_TRIE_STATS
bool "FIB TRIE statistics"
- depends on IP_FIB_TRIE
+ depends on IP_ADVANCED_ROUTER
---help---
Keep track of statistics on structure of FIB TRIE table.
Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@ config IP_ROUTE_VERBOSE
handled by the klogd daemon which is responsible for kernel messages
("man klogd").
+config IP_ROUTE_CLASSID
+ bool
+
config IP_PNP
bool "IP: kernel level autoconfiguration"
help
@@ -657,4 +624,3 @@ config TCP_MD5SIG
on the Internet.
If unsure, say N.
-
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4978d22f9a75..0dc772d0d125 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,12 +10,10 @@ obj-y := route.o inetpeer.o protocol.o \
tcp_minisocks.o tcp_cong.o \
datagram.o raw.o udp.o udplite.o \
arp.o icmp.o devinet.o af_inet.o igmp.o \
- fib_frontend.o fib_semantics.o \
+ fib_frontend.o fib_semantics.o fib_trie.o \
inet_fragment.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b61107df6c..7ceb80447631 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(inet_ioctl);
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ int err = -ENOIOCTLCMD;
+
+ if (sk->sk_prot->compat_ioctl)
+ err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+ return err;
+}
+#endif
+
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
@@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
@@ -1215,7 +1231,7 @@ out:
return err;
}
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 04c8b69fd426..7927589813b5 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
return 0;
}
- if (__in_dev_get_rcu(dev)) {
- IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+ if (__in_dev_get_rtnl(dev)) {
+ IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
return 0;
}
return -ENXIO;
}
-/* must be called with rcu_read_lock() */
static int arp_req_set_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1233,10 +1232,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
htonl(0xFFFFFFFFUL);
- rcu_read_lock();
+ rtnl_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- dev = dev_get_by_name_rcu(net, r.arp_dev);
+ dev = __dev_get_by_name(net, r.arp_dev);
if (dev == NULL)
goto out;
@@ -1263,7 +1262,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
}
out:
- rcu_read_unlock();
+ rtnl_unlock();
if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
err = -EFAULT;
return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 1d2cdd43a878..2a49c061b34c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -51,11 +51,11 @@ static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
- local_table = fib_hash_table(RT_TABLE_LOCAL);
+ local_table = fib_trie_table(RT_TABLE_LOCAL);
if (local_table == NULL)
return -ENOMEM;
- main_table = fib_hash_table(RT_TABLE_MAIN);
+ main_table = fib_trie_table(RT_TABLE_MAIN);
if (main_table == NULL)
goto fail;
@@ -82,7 +82,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
if (tb)
return tb;
- tb = fib_hash_table(id);
+ tb = fib_trie_table(id);
if (!tb)
return NULL;
h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
-void fib_select_default(struct net *net,
- const struct flowi *flp, struct fib_result *res)
-{
- struct fib_table *tb;
- int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
- return;
- table = res->r->table;
-#endif
- tb = fib_get_table(net, table);
- if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- fib_table_select_default(tb, flp, res);
-}
-
static void fib_flush(struct net *net)
{
int flushed = 0;
@@ -1101,5 +1086,5 @@ void __init ip_fib_init(void)
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- fib_hash_init();
+ fib_trie_init();
}
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644
index b3acb0417b21..000000000000
--- a/net/ipv4/fib_hash.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
- struct hlist_node fn_hash;
- struct list_head fn_alias;
- __be32 fn_key;
- struct fib_alias fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
- struct fn_zone __rcu *fz_next; /* Next not empty zone */
- struct hlist_head __rcu *fz_hash; /* Hash table pointer */
- seqlock_t fz_lock;
- u32 fz_hashmask; /* (fz_divisor - 1) */
-
- u8 fz_order; /* Zone order (0..32) */
- u8 fz_revorder; /* 32 - fz_order */
- __be32 fz_mask; /* inet_make_mask(order) */
-#define FZ_MASK(fz) ((fz)->fz_mask)
-
- struct hlist_head fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
- int fz_nent; /* Number of entries */
- int fz_divisor; /* Hash size (mask+1) */
-};
-
-struct fn_hash {
- struct fn_zone *fn_zones[33];
- struct fn_zone __rcu *fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
- u32 h = ntohl(key) >> fz->fz_revorder;
- h ^= (h>>20);
- h ^= (h>>10);
- h ^= (h>>5);
- h &= fz->fz_hashmask;
- return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
- return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- return kzalloc(size, GFP_KERNEL);
-
- return (struct hlist_head *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
- struct hlist_head *old_ht,
- int old_divisor)
-{
- int i;
-
- for (i = 0; i < old_divisor; i++) {
- struct hlist_node *node, *n;
- struct fib_node *f;
-
- hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head *new_head;
-
- hlist_del_rcu(&f->fn_hash);
-
- new_head = rcu_dereference_protected(fz->fz_hash, 1) +
- fn_hash(f->fn_key, fz);
- hlist_add_head_rcu(&f->fn_hash, new_head);
- }
- }
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- kfree(hash);
- else
- free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
- struct hlist_head *ht, *old_ht;
- int old_divisor, new_divisor;
- u32 new_hashmask;
-
- new_divisor = old_divisor = fz->fz_divisor;
-
- switch (old_divisor) {
- case EMBEDDED_HASH_SIZE:
- new_divisor *= EMBEDDED_HASH_SIZE;
- break;
- case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
- new_divisor *= (EMBEDDED_HASH_SIZE/2);
- break;
- default:
- if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
- printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
- return;
- }
- new_divisor = (old_divisor << 1);
- break;
- }
-
- new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
- printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
- fz->fz_order, old_divisor);
-#endif
-
- ht = fz_hash_alloc(new_divisor);
-
- if (ht) {
- struct fn_zone nfz;
-
- memcpy(&nfz, fz, sizeof(nfz));
-
- write_seqlock_bh(&fz->fz_lock);
- old_ht = rcu_dereference_protected(fz->fz_hash, 1);
- RCU_INIT_POINTER(nfz.fz_hash, ht);
- nfz.fz_hashmask = new_hashmask;
- nfz.fz_divisor = new_divisor;
- fn_rebuild_zone(&nfz, old_ht, old_divisor);
- fib_hash_genid++;
- rcu_assign_pointer(fz->fz_hash, ht);
- fz->fz_hashmask = new_hashmask;
- fz->fz_divisor = new_divisor;
- write_sequnlock_bh(&fz->fz_lock);
-
- if (old_ht != fz->fz_embedded_hash) {
- synchronize_rcu();
- fz_hash_free(old_ht, old_divisor);
- }
- }
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
- struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
- kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
- call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
- struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
- kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
- fib_release_info(fa->fa_info);
- if (fa == &f->fn_embedded_alias)
- fa->fa_info = NULL;
- else
- call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
- int i;
- struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
- if (!fz)
- return NULL;
-
- seqlock_init(&fz->fz_lock);
- fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
- fz->fz_hashmask = fz->fz_divisor - 1;
- RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
- fz->fz_order = z;
- fz->fz_revorder = 32 - z;
- fz->fz_mask = inet_make_mask(z);
-
- /* Find the first not empty zone with more specific mask */
- for (i = z + 1; i <= 32; i++)
- if (table->fn_zones[i])
- break;
- if (i > 32) {
- /* No more specific masks, we are the first. */
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zone_list));
- rcu_assign_pointer(table->fn_zone_list, fz);
- } else {
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zones[i]->fz_next));
- rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
- }
- table->fn_zones[z] = fz;
- fib_hash_genid++;
- return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res,
- int fib_flags)
-{
- int err;
- struct fn_zone *fz;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
- rcu_read_lock();
- for (fz = rcu_dereference(t->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head *head;
- struct hlist_node *node;
- struct fib_node *f;
- __be32 k;
- unsigned int seq;
-
- do {
- seq = read_seqbegin(&fz->fz_lock);
- k = fz_key(flp->fl4_dst, fz);
-
- head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key != k)
- continue;
-
- err = fib_semantic_match(&f->fn_alias,
- flp, res,
- fz->fz_order, fib_flags);
- if (err <= 0)
- goto out;
- }
- } while (read_seqretry(&fz->fz_lock, seq));
- }
- err = 1;
-out:
- rcu_read_unlock();
- return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res)
-{
- int order, last_idx;
- struct hlist_node *node;
- struct fib_node *f;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
- struct fn_zone *fz = t->fn_zones[0];
- struct hlist_head *head;
-
- if (fz == NULL)
- return;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
- head = rcu_dereference(fz->fz_hash);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- }
-
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
-
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
- hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
- struct hlist_node *node;
- struct fib_node *f;
-
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key == key)
- return f;
- }
-
- return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
- struct fib_alias *fa = &f->fn_embedded_alias;
-
- if (fa->fa_info != NULL)
- fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
- return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fib_node *new_f = NULL;
- struct fib_node *f;
- struct fib_alias *fa, *new_fa;
- struct fn_zone *fz;
- struct fib_info *fi;
- u8 tos = cfg->fc_tos;
- __be32 key;
- int err;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- fz = table->fn_zones[cfg->fc_dst_len];
- if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
- return -ENOBUFS;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- fi = fib_create_info(cfg);
- if (IS_ERR(fi))
- return PTR_ERR(fi);
-
- if (fz->fz_nent > (fz->fz_divisor<<1) &&
- fz->fz_divisor < FZ_MAX_DIVISOR &&
- (cfg->fc_dst_len == 32 ||
- (1 << cfg->fc_dst_len) > fz->fz_divisor))
- fn_rehash_zone(fz);
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
- /* Now fa, if non-NULL, points to the first fib alias
- * with the same keys [prefix,tos,priority], if such key already
- * exists or to the node before which we will insert new one.
- *
- * If fa is NULL, we will need to allocate a new one and
- * insert to the head of f.
- *
- * If f is NULL, no fib node matched the destination key
- * and we need to allocate a new one of those as well.
- */
-
- if (fa && fa->fa_tos == tos &&
- fa->fa_info->fib_priority == fi->fib_priority) {
- struct fib_alias *fa_first, *fa_match;
-
- err = -EEXIST;
- if (cfg->fc_nlflags & NLM_F_EXCL)
- goto out;
-
- /* We have 2 goals:
- * 1. Find exact match for type, scope, fib_info to avoid
- * duplicate routes
- * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
- */
- fa_match = NULL;
- fa_first = fa;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- if (fa->fa_tos != tos)
- break;
- if (fa->fa_info->fib_priority != fi->fib_priority)
- break;
- if (fa->fa_type == cfg->fc_type &&
- fa->fa_scope == cfg->fc_scope &&
- fa->fa_info == fi) {
- fa_match = fa;
- break;
- }
- }
-
- if (cfg->fc_nlflags & NLM_F_REPLACE) {
- u8 state;
-
- fa = fa_first;
- if (fa_match) {
- if (fa == fa_match)
- err = 0;
- goto out;
- }
- err = -ENOBUFS;
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_tos = fa->fa_tos;
- new_fa->fa_info = fi;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- state = fa->fa_state;
- new_fa->fa_state = state & ~FA_S_ACCESSED;
- fib_hash_genid++;
- list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
- fn_free_alias(fa, f);
- if (state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
- return 0;
- }
-
- /* Error if we find a perfect match which
- * uses the same scope, type, and nexthop
- * information.
- */
- if (fa_match)
- goto out;
-
- if (!(cfg->fc_nlflags & NLM_F_APPEND))
- fa = fa_first;
- }
-
- err = -ENOENT;
- if (!(cfg->fc_nlflags & NLM_F_CREATE))
- goto out;
-
- err = -ENOBUFS;
-
- if (!f) {
- new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
- if (new_f == NULL)
- goto out;
-
- INIT_HLIST_NODE(&new_f->fn_hash);
- INIT_LIST_HEAD(&new_f->fn_alias);
- new_f->fn_key = key;
- f = new_f;
- }
-
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_info = fi;
- new_fa->fa_tos = tos;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- new_fa->fa_state = 0;
-
- /*
- * Insert new entry to the list.
- */
-
- if (new_f)
- fib_insert_node(fz, new_f);
- list_add_tail_rcu(&new_fa->fa_list,
- (fa ? &fa->fa_list : &f->fn_alias));
- fib_hash_genid++;
-
- if (new_f)
- fz->fz_nent++;
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
- &cfg->fc_nlinfo, 0);
- return 0;
-
-out:
- if (new_f)
- kmem_cache_free(fn_hash_kmem, new_f);
- fib_release_info(fi);
- return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
- struct fib_node *f;
- struct fib_alias *fa, *fa_to_delete;
- struct fn_zone *fz;
- __be32 key;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- if ((fz = table->fn_zones[cfg->fc_dst_len]) == NULL)
- return -ESRCH;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
- if (!fa)
- return -ESRCH;
-
- fa_to_delete = NULL;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fa->fa_tos != cfg->fc_tos)
- break;
-
- if ((!cfg->fc_type ||
- fa->fa_type == cfg->fc_type) &&
- (cfg->fc_scope == RT_SCOPE_NOWHERE ||
- fa->fa_scope == cfg->fc_scope) &&
- (!cfg->fc_protocol ||
- fi->fib_protocol == cfg->fc_protocol) &&
- fib_nh_match(cfg, fi) == 0) {
- fa_to_delete = fa;
- break;
- }
- }
-
- if (fa_to_delete) {
- int kill_fn;
-
- fa = fa_to_delete;
- rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, 0);
-
- kill_fn = 0;
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_fn = 1;
- }
- fib_hash_genid++;
-
- if (fa->fa_state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- fn_free_alias(fa, f);
- if (kill_fn) {
- fn_free_node(f);
- fz->fz_nent--;
- }
-
- return 0;
- }
- return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
- struct hlist_node *node, *n;
- struct fib_node *f;
- int found = 0;
-
- hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
- struct fib_alias *fa, *fa_node;
- int kill_f;
-
- kill_f = 0;
- list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_f = 1;
- }
- fib_hash_genid++;
-
- fn_free_alias(fa, f);
- found++;
- }
- }
- if (kill_f) {
- fn_free_node(f);
- fz->fz_nent--;
- }
- }
- return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz;
- int found = 0;
-
- for (fz = rtnl_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rtnl_dereference(fz->fz_next)) {
- int i;
-
- for (i = fz->fz_divisor - 1; i >= 0; i--)
- found += fn_flush_list(fz, i);
- }
- return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz, *next;
-
- next = table->fn_zone_list;
- while (next != NULL) {
- fz = next;
- next = fz->fz_next;
-
- if (fz->fz_hash != fz->fz_embedded_hash)
- fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
- kfree(fz);
- }
-
- kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz,
- struct hlist_head *head)
-{
- struct hlist_node *node;
- struct fib_node *f;
- int i, s_i;
-
- s_i = cb->args[4];
- i = 0;
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- if (i < s_i)
- goto next;
-
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- fa->fa_scope,
- f->fn_key,
- fz->fz_order,
- fa->fa_tos,
- fa->fa_info,
- NLM_F_MULTI) < 0) {
- cb->args[4] = i;
- return -1;
- }
-next:
- i++;
- }
- }
- cb->args[4] = i;
- return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz)
-{
- int h, s_h;
- struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
- if (head == NULL)
- return skb->len;
- s_h = cb->args[3];
- for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(head + h))
- continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
- cb->args[3] = h;
- return -1;
- }
- memset(&cb->args[4], 0,
- sizeof(cb->args) - 4*sizeof(cb->args[0]));
- }
- cb->args[3] = h;
- return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int m = 0, s_m;
- struct fn_zone *fz;
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
- s_m = cb->args[2];
- rcu_read_lock();
- for (fz = rcu_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next), m++) {
- if (m < s_m)
- continue;
- if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
- cb->args[2] = m;
- rcu_read_unlock();
- return -1;
- }
- memset(&cb->args[3], 0,
- sizeof(cb->args) - 3*sizeof(cb->args[0]));
- }
- rcu_read_unlock();
- cb->args[2] = m;
- return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
- fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
- 0, SLAB_PANIC, NULL);
-
- fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
- 0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
- struct fib_table *tb;
-
- tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
- GFP_KERNEL);
- if (tb == NULL)
- return NULL;
-
- tb->tb_id = id;
- tb->tb_default = -1;
-
- memset(tb->tb_data, 0, sizeof(struct fn_hash));
- return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
- struct seq_net_private p;
- struct fn_zone *zone;
- int bucket;
- struct hlist_head *hash_head;
- struct fib_node *fn;
- struct fib_alias *fa;
- loff_t pos;
- unsigned int genid;
- int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_table *main_table;
- struct fn_hash *table;
-
- main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
- table = (struct fn_hash *)main_table->tb_data;
-
- iter->bucket = 0;
- iter->hash_head = NULL;
- iter->fn = NULL;
- iter->fa = NULL;
- iter->pos = 0;
- iter->genid = fib_hash_genid;
- iter->valid = 1;
-
- for (iter->zone = rcu_dereference(table->fn_zone_list);
- iter->zone != NULL;
- iter->zone = rcu_dereference(iter->zone->fz_next)) {
- int maxslot;
-
- if (!iter->zone->fz_nent)
- continue;
-
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
- maxslot = iter->zone->fz_divisor;
-
- for (iter->bucket = 0; iter->bucket < maxslot;
- ++iter->bucket, ++iter->hash_head) {
- struct hlist_node *node;
- struct fib_node *fn;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
- }
-out:
- return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_node *fn;
- struct fib_alias *fa;
-
- /* Advance FA, if any. */
- fn = iter->fn;
- fa = iter->fa;
- if (fa) {
- BUG_ON(!fn);
- list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
-
- fa = iter->fa = NULL;
-
- /* Advance FN. */
- if (fn) {
- struct hlist_node *node = &fn->fn_hash;
- hlist_for_each_entry_continue(fn, node, fn_hash) {
- iter->fn = fn;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- fn = iter->fn = NULL;
-
- /* Advance hash chain. */
- if (!iter->zone)
- goto out;
-
- for (;;) {
- struct hlist_node *node;
- int maxslot;
-
- maxslot = iter->zone->fz_divisor;
-
- while (++iter->bucket < maxslot) {
- iter->hash_head++;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- iter->zone = rcu_dereference(iter->zone->fz_next);
-
- if (!iter->zone)
- goto out;
-
- iter->bucket = 0;
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-out:
- iter->pos++;
- return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_alias *fa;
-
- if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
- fa = iter->fa;
- pos -= iter->pos;
- } else
- fa = fib_get_first(seq);
-
- if (fa)
- while (pos && (fa = fib_get_next(seq)))
- --pos;
- return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- void *v = NULL;
-
- rcu_read_lock();
- if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
- v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
- return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- ++*pos;
- return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
- static const unsigned type2flags[RTN_MAX + 1] = {
- [7] = RTF_REJECT,
- [8] = RTF_REJECT,
- };
- unsigned flags = type2flags[type];
-
- if (fi && fi->fib_nh->nh_gw)
- flags |= RTF_GATEWAY;
- if (mask == htonl(0xFFFFFFFF))
- flags |= RTF_HOST;
- flags |= RTF_UP;
- return flags;
-}
-
-/*
- * This outputs /proc/net/route.
- *
- * It always works in backward compatibility mode.
- * The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
- struct fib_iter_state *iter;
- int len;
- __be32 prefix, mask;
- unsigned flags;
- struct fib_node *f;
- struct fib_alias *fa;
- struct fib_info *fi;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
- "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
- "\tWindow\tIRTT");
- goto out;
- }
-
- iter = seq->private;
- f = iter->fn;
- fa = iter->fa;
- fi = fa->fa_info;
- prefix = f->fn_key;
- mask = FZ_MASK(iter->zone);
- flags = fib_flag_trans(fa->fa_type, mask, fi);
- if (fi)
- seq_printf(seq,
- "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- fi->fib_dev ? fi->fib_dev->name : "*", prefix,
- fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
- mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
- fi->fib_window,
- fi->fib_rtt >> 3, &len);
- else
- seq_printf(seq,
- "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
- seq_printf(seq, "%*s\n", 127 - len, "");
-out:
- return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
- .start = fib_seq_start,
- .next = fib_seq_next,
- .stop = fib_seq_stop,
- .show = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &fib_seq_ops,
- sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
- .owner = THIS_MODULE,
- .open = fib_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
- if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
- return -ENOMEM;
- return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
- proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c079cc0ec651..d5c40d8f6632 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -25,7 +25,7 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
}
/* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
+extern int fib_semantic_match(struct fib_table *tb, struct list_head *head,
const struct flowi *flp,
struct fib_result *res, int prefixlen, int fib_flags);
extern void fib_release_info(struct fib_info *);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 7981a24f5c7b..9cefe72029cf 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -41,12 +41,12 @@ struct fib4_rule {
__be32 srcmask;
__be32 dst;
__be32 dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
u32 tclassid;
#endif
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
u32 fib_rules_tclass(struct fib_result *res)
{
return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
@@ -165,7 +165,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (frh->dst_len)
rule4->dst = nla_get_be32(tb[FRA_DST]);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW])
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
#endif
@@ -195,7 +195,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->tos && (rule4->tos != frh->tos))
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
#endif
@@ -224,7 +224,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
if (rule4->src_len)
NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rule4->tclassid)
NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
#endif
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 12d3dc3df1b7..146bd82ef60d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -49,7 +49,7 @@
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
@@ -152,6 +152,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics)
+ kfree(fi->fib_metrics);
kfree(fi);
}
@@ -200,7 +202,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight != onh->nh_weight ||
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +223,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
unsigned int val = fi->fib_nhs;
val ^= fi->fib_protocol;
@@ -422,7 +424,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
@@ -476,7 +478,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla && nla_get_be32(nla) != nh->nh_gw)
return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
@@ -613,14 +615,14 @@ out:
static inline unsigned int fib_laddr_hashfn(__be32 val)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
return ((__force u32)val ^
((__force u32)val >> 7) ^
((__force u32)val >> 14)) & mask;
}
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
{
if (bytes <= PAGE_SIZE)
return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +632,7 @@ static struct hlist_head *fib_hash_alloc(int bytes)
get_order(bytes));
}
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
{
if (!hash)
return;
@@ -641,18 +643,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes)
free_pages((unsigned long) hash, get_order(bytes));
}
-static void fib_hash_move(struct hlist_head *new_info_hash,
- struct hlist_head *new_laddrhash,
- unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+ struct hlist_head *new_laddrhash,
+ unsigned int new_size)
{
struct hlist_head *old_info_hash, *old_laddrhash;
- unsigned int old_size = fib_hash_size;
+ unsigned int old_size = fib_info_hash_size;
unsigned int i, bytes;
spin_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
- fib_hash_size = new_size;
+ fib_info_hash_size = new_size;
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +695,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
spin_unlock_bh(&fib_info_lock);
bytes = old_size * sizeof(struct hlist_head *);
- fib_hash_free(old_info_hash, bytes);
- fib_hash_free(old_laddrhash, bytes);
+ fib_info_hash_free(old_info_hash, bytes);
+ fib_info_hash_free(old_laddrhash, bytes);
}
struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -718,8 +720,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
#endif
err = -ENOBUFS;
- if (fib_info_cnt >= fib_hash_size) {
- unsigned int new_size = fib_hash_size << 1;
+ if (fib_info_cnt >= fib_info_hash_size) {
+ unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
unsigned int bytes;
@@ -727,21 +729,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (!new_size)
new_size = 1;
bytes = new_size * sizeof(struct hlist_head *);
- new_info_hash = fib_hash_alloc(bytes);
- new_laddrhash = fib_hash_alloc(bytes);
+ new_info_hash = fib_info_hash_alloc(bytes);
+ new_laddrhash = fib_info_hash_alloc(bytes);
if (!new_info_hash || !new_laddrhash) {
- fib_hash_free(new_info_hash, bytes);
- fib_hash_free(new_laddrhash, bytes);
+ fib_info_hash_free(new_info_hash, bytes);
+ fib_info_hash_free(new_laddrhash, bytes);
} else
- fib_hash_move(new_info_hash, new_laddrhash, new_size);
+ fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
- if (!fib_hash_size)
+ if (!fib_info_hash_size)
goto failure;
}
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
fib_info_cnt++;
fi->fib_net = hold_net(net);
@@ -779,7 +787,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto err_inval;
if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
goto err_inval;
#endif
@@ -792,7 +800,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -881,8 +889,9 @@ failure:
}
/* Note! fib_semantic_match intentionally uses RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
- struct fib_result *res, int prefixlen, int fib_flags)
+int fib_semantic_match(struct fib_table *tb, struct list_head *head,
+ const struct flowi *flp, struct fib_result *res,
+ int prefixlen, int fib_flags)
{
struct fib_alias *fa;
int nh_sel = 0;
@@ -946,6 +955,8 @@ out_fill_res:
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->fi = fa->fa_info;
+ res->table = tb;
+ res->fa_head = head;
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&res->fi->fib_clntref);
return 0;
@@ -1002,7 +1013,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (fi->fib_nh->nh_oif)
NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (fi->fib_nh[0].nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
#endif
@@ -1027,7 +1038,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (nh->nh_gw)
NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
#endif
@@ -1125,6 +1136,62 @@ int fib_sync_down_dev(struct net_device *dev, int force)
return ret;
}
+/* Must be invoked inside of an RCU protected region. */
+void fib_select_default(struct fib_result *res)
+{
+ struct fib_info *fi = NULL, *last_resort = NULL;
+ struct list_head *fa_head = res->fa_head;
+ struct fib_table *tb = res->table;
+ int order = -1, last_idx = -1;
+ struct fib_alias *fa;
+
+ list_for_each_entry_rcu(fa, fa_head, fa_list) {
+ struct fib_info *next_fi = fa->fa_info;
+
+ if (fa->fa_scope != res->scope ||
+ fa->fa_type != RTN_UNICAST)
+ continue;
+
+ if (next_fi->fib_priority > res->fi->fib_priority)
+ break;
+ if (!next_fi->fib_nh[0].nh_gw ||
+ next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+ continue;
+
+ fib_alias_accessed(fa);
+
+ if (fi == NULL) {
+ if (next_fi != res->fi)
+ break;
+ } else if (!fib_detect_death(fi, order, &last_resort,
+ &last_idx, tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+ fi = next_fi;
+ order++;
+ }
+
+ if (order <= 0 || fi == NULL) {
+ tb->tb_default = -1;
+ goto out;
+ }
+
+ if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+ tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+
+ if (last_idx >= 0)
+ fib_result_assign(res, last_resort);
+ tb->tb_default = last_idx;
+out:
+ rcu_read_unlock();
+}
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0f280348e0fd..1eae90b054eb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -95,7 +95,7 @@ typedef unsigned int t_key;
#define IS_TNODE(n) (!(n->parent & T_LEAF))
#define IS_LEAF(n) (n->parent & T_LEAF)
-struct node {
+struct rt_trie_node {
unsigned long parent;
t_key key;
};
@@ -126,7 +126,7 @@ struct tnode {
struct work_struct work;
struct tnode *tnode_free;
};
- struct node *child[0];
+ struct rt_trie_node *child[0];
};
#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@ struct trie_stat {
};
struct trie {
- struct node *trie;
+ struct rt_trie_node *trie;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats stats;
#endif
};
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn);
/* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@ static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
{
return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
}
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
{
struct tnode *ret = node_parent(node);
@@ -192,22 +192,22 @@ static inline struct tnode *node_parent_rcu(struct node *node)
/* Same as rcu_assign_pointer
* but that macro() assumes that value is a pointer.
*/
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
{
smp_wmb();
node->parent = (unsigned long)ptr | NODE_TYPE(node);
}
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
{
BUG_ON(i >= 1U << tn->bits);
return tn->child[i];
}
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
- struct node *ret = tnode_get_child(tn, i);
+ struct rt_trie_node *ret = tnode_get_child(tn, i);
return rcu_dereference_rtnl(ret);
}
@@ -378,7 +378,7 @@ static void __tnode_free_rcu(struct rcu_head *head)
{
struct tnode *tn = container_of(head, struct tnode, rcu);
size_t size = sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
if (size <= PAGE_SIZE)
kfree(tn);
@@ -402,7 +402,7 @@ static void tnode_free_safe(struct tnode *tn)
tn->tnode_free = tnode_free_head;
tnode_free_head = tn;
tnode_free_size += sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
}
static void tnode_free_flush(void)
@@ -443,7 +443,7 @@ static struct leaf_info *leaf_info_new(int plen)
static struct tnode *tnode_new(t_key key, int pos, int bits)
{
- size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+ size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
struct tnode *tn = tnode_alloc(sz);
if (tn) {
@@ -456,7 +456,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct node) << bits);
+ sizeof(struct rt_trie_node) << bits);
return tn;
}
@@ -465,7 +465,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
{
if (n == NULL || IS_LEAF(n))
return 0;
@@ -474,7 +474,7 @@ static inline int tnode_full(const struct tnode *tn, const struct node *n)
}
static inline void put_child(struct trie *t, struct tnode *tn, int i,
- struct node *n)
+ struct rt_trie_node *n)
{
tnode_put_child_reorg(tn, i, n, -1);
}
@@ -484,10 +484,10 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
* Update the value of full_children and empty_children.
*/
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull)
{
- struct node *chi = tn->child[i];
+ struct rt_trie_node *chi = tn->child[i];
int isfull;
BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
}
#define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
{
int i;
struct tnode *old_tn;
@@ -605,7 +605,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!node_parent((struct node *)tn)) {
+ if (!node_parent((struct rt_trie_node *)tn)) {
inflate_threshold_use = inflate_threshold_root;
halve_threshold_use = halve_threshold_root;
} else {
@@ -635,7 +635,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
/*
* Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
if (tn->empty_children == tnode_child_length(tn) - 1) {
one_child:
for (i = 0; i < tnode_child_length(tn); i++) {
- struct node *n;
+ struct rt_trie_node *n;
n = tn->child[i];
if (!n)
@@ -676,7 +676,7 @@ one_child:
return n;
}
}
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
}
static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
goto nomem;
}
- put_child(t, tn, 2*i, (struct node *) left);
- put_child(t, tn, 2*i+1, (struct node *) right);
+ put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+ put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
}
}
for (i = 0; i < olen; i++) {
struct tnode *inode;
- struct node *node = tnode_get_child(oldtnode, i);
+ struct rt_trie_node *node = tnode_get_child(oldtnode, i);
struct tnode *left, *right;
int size, j;
@@ -825,7 +825,7 @@ nomem:
static struct tnode *halve(struct trie *t, struct tnode *tn)
{
struct tnode *oldtnode = tn;
- struct node *left, *right;
+ struct rt_trie_node *left, *right;
int i;
int olen = tnode_child_length(tn);
@@ -856,7 +856,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (!newn)
goto nomem;
- put_child(t, tn, i/2, (struct node *)newn);
+ put_child(t, tn, i/2, (struct rt_trie_node *)newn);
}
}
@@ -958,7 +958,7 @@ fib_find_node(struct trie *t, u32 key)
{
int pos;
struct tnode *tn;
- struct node *n;
+ struct rt_trie_node *n;
pos = 0;
n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
key = tn->key;
- while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+ while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
tn = (struct tnode *) resize(t, (struct tnode *)tn);
tnode_put_child_reorg((struct tnode *)tp, cindex,
- (struct node *)tn, wasfull);
+ (struct rt_trie_node *)tn, wasfull);
- tp = node_parent((struct node *) tn);
+ tp = node_parent((struct rt_trie_node *) tn);
if (!tp)
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
if (!tp)
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
}
@@ -1025,7 +1025,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
{
int pos, newpos;
struct tnode *tp = NULL, *tn = NULL;
- struct node *n;
+ struct rt_trie_node *n;
struct leaf *l;
int missbit;
struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
if (t->trie && n == NULL) {
/* Case 2: n is NULL, and will just insert a new leaf */
- node_set_parent((struct node *)l, tp);
+ node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+ put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1141,18 +1141,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
return NULL;
}
- node_set_parent((struct node *)tn, tp);
+ node_set_parent((struct rt_trie_node *)tn, tp);
missbit = tkey_extract_bits(key, newpos, 1);
- put_child(t, tn, missbit, (struct node *)l);
+ put_child(t, tn, missbit, (struct rt_trie_node *)l);
put_child(t, tn, 1-missbit, n);
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
put_child(t, (struct tnode *)tp, cindex,
- (struct node *)tn);
+ (struct rt_trie_node *)tn);
} else {
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
}
}
@@ -1340,7 +1340,7 @@ err:
}
/* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
t_key key, const struct flowi *flp,
struct fib_result *res, int fib_flags)
{
@@ -1356,7 +1356,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
if (l->key != (key & ntohl(mask)))
continue;
- err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+ err = fib_semantic_match(tb, &li->falh, flp, res, plen, fib_flags);
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (err <= 0)
@@ -1376,7 +1376,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
{
struct trie *t = (struct trie *) tb->tb_data;
int ret;
- struct node *n;
+ struct rt_trie_node *n;
struct tnode *pn;
int pos, bits;
t_key key = ntohl(flp->fl4_dst);
@@ -1398,7 +1398,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
/* Just a leaf? */
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
goto found;
}
@@ -1423,7 +1423,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
}
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
if (ret > 0)
goto backtrace;
goto found;
@@ -1541,7 +1541,7 @@ backtrace:
if (chopped_off <= pn->bits) {
cindex &= ~(1 << (chopped_off-1));
} else {
- struct tnode *parent = node_parent_rcu((struct node *) pn);
+ struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
if (!parent)
goto failed;
@@ -1568,7 +1568,7 @@ found:
*/
static void trie_leaf_remove(struct trie *t, struct leaf *l)
{
- struct tnode *tp = node_parent((struct node *) l);
+ struct tnode *tp = node_parent((struct rt_trie_node *) l);
pr_debug("entering trie_leaf_remove(%p)\n", l);
@@ -1706,7 +1706,7 @@ static int trie_flush_leaf(struct leaf *l)
* Scan for the next right leaf starting at node p->child[idx]
* Since we have back pointer, no recursion necessary.
*/
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
{
do {
t_key idx;
@@ -1732,7 +1732,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
}
/* Node empty, walk back up to parent */
- c = (struct node *) p;
+ c = (struct rt_trie_node *) p;
} while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
@@ -1753,7 +1753,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
static struct leaf *trie_nextleaf(struct leaf *l)
{
- struct node *c = (struct node *) l;
+ struct rt_trie_node *c = (struct rt_trie_node *) l;
struct tnode *p = node_parent_rcu(c);
if (!p)
@@ -1802,80 +1802,6 @@ void fib_free_table(struct fib_table *tb)
kfree(tb);
}
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp,
- struct fib_result *res)
-{
- struct trie *t = (struct trie *) tb->tb_data;
- int order, last_idx;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fib_alias *fa = NULL;
- struct list_head *fa_head;
- struct leaf *l;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
-
- l = fib_find_node(t, 0);
- if (!l)
- goto out;
-
- fa_head = get_fa_head(l, 0);
- if (!fa_head)
- goto out;
-
- if (list_empty(fa_head))
- goto out;
-
- list_for_each_entry_rcu(fa, fa_head, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1916,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
return skb->len;
}
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
{
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
@@ -2003,8 +1929,7 @@ void __init fib_hash_init(void)
}
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
{
struct fib_table *tb;
struct trie *t;
@@ -2036,7 +1961,7 @@ struct fib_trie_iter {
unsigned int depth;
};
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
{
struct tnode *tn = iter->tnode;
unsigned int cindex = iter->index;
@@ -2050,7 +1975,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
iter->tnode, iter->index, iter->depth);
rescan:
while (cindex < (1<<tn->bits)) {
- struct node *n = tnode_get_child_rcu(tn, cindex);
+ struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
if (n) {
if (IS_LEAF(n)) {
@@ -2069,7 +1994,7 @@ rescan:
}
/* Current node exhausted, pop back up */
- p = node_parent_rcu((struct node *)tn);
+ p = node_parent_rcu((struct rt_trie_node *)tn);
if (p) {
cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
tn = p;
@@ -2081,10 +2006,10 @@ rescan:
return NULL;
}
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
- struct node *n;
+ struct rt_trie_node *n;
if (!t)
return NULL;
@@ -2108,7 +2033,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
- struct node *n;
+ struct rt_trie_node *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
@@ -2181,7 +2106,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
- bytes += sizeof(struct node *) * pointers;
+ bytes += sizeof(struct rt_trie_node *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
@@ -2262,7 +2187,7 @@ static const struct file_operations fib_triestat_fops = {
.release = single_release_net,
};
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2275,7 +2200,7 @@ static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
- struct node *n;
+ struct rt_trie_node *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
@@ -2304,7 +2229,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
- struct node *n;
+ struct rt_trie_node *n;
++*pos;
/* next node in same table */
@@ -2390,7 +2315,7 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
- struct node *n = v;
+ struct rt_trie_node *n = v;
if (!node_parent_rcu(n))
fib_table_print(seq, iter->tb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2746c1fa6417..2ada17129fce 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc85751c74..b6513b13d729 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -475,7 +475,7 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
- struct inet_peer_base *base = family_to_base(AF_INET);
+ struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
/* Look up for the address quickly, lockless.
@@ -512,6 +512,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
atomic_set(&p->rid, 0);
atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
p->tcp_ts_stamp = 0;
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
INIT_LIST_HEAD(&p->unused);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d859bcc26cb7..d7b2b0987a3b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
}
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (unlikely(skb_dst(skb)->tclassid)) {
struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
u32 idx = skb_dst(skb)->tclassid;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9afd73e0..7e41ac0b9260 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
@@ -1434,6 +1435,51 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+ struct in_addr src;
+ struct in_addr grp;
+ compat_ulong_t pktcnt;
+ compat_ulong_t bytecnt;
+ compat_ulong_t wrong_if;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+ struct sioc_sg_req sr;
+ struct mfc_cache *c;
+ struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
+
+ switch (cmd) {
+ case SIOCGETSGCNT:
+ if (copy_from_user(&sr, arg, sizeof(sr)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+ if (c) {
+ sr.pktcnt = c->mfc_un.res.pkt;
+ sr.bytecnt = c->mfc_un.res.bytes;
+ sr.wrong_if = c->mfc_un.res.wrong_if;
+ rcu_read_unlock();
+
+ if (copy_to_user(arg, &sr, sizeof(sr)))
+ return -EFAULT;
+ return 0;
+ }
+ rcu_read_unlock();
+ return -EADDRNOTAVAIL;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index babd1a2bae5f..f926a310075d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -206,8 +206,9 @@ config IP_NF_TARGET_REDIRECT
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
- depends on NF_NAT
+ depends on NF_CONNTRACK_SNMP && NF_NAT
depends on NETFILTER_ADVANCED
+ default NF_NAT && NF_CONNTRACK_SNMP
---help---
This module implements an Application Layer Gateway (ALG) for
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e855fffaed95..e95054c690c6 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -866,6 +866,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(NFPROTO_ARP, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1333,6 +1334,7 @@ static int translate_compat_table(const char *name,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
+ xt_compat_init_offsets(NFPROTO_ARP, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 652efea013dc..ef7d7b9680ea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1063,6 +1063,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1664,6 +1665,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET);
+ xt_compat_init_offsets(AF_INET, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1e26a4897655..403ca57f6011 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -300,13 +300,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
* that the ->target() function isn't called after ->destroy() */
ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL) {
- pr_info("no conntrack!\n");
- /* FIXME: need to drop invalid ones, since replies
- * to outgoing connections of other nodes will be
- * marked as INVALID */
+ if (ct == NULL)
return NF_DROP;
- }
/* special case: ICMP error handling. conntrack distinguishes between
* error messages (RELATED) and information requests (see below) */
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 72ffc8fda2e9..d76d6c9ed946 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -442,8 +442,7 @@ ipt_log_packet(u_int8_t pf,
}
#endif
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, 0);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 294a2a32f293..aef5d1fbe77d 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
dev_net(out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
- if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+ if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 63f60fc5d26a..5585980fce2e 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -20,6 +20,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
struct ct_iter_state {
struct seq_net_private p;
@@ -35,7 +36,8 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -48,13 +50,14 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
}
return head;
}
@@ -217,7 +220,8 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -230,11 +234,12 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 0f23b3f06df0..703f366fd235 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb,
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
- int ret;
+ int res;
exp->tuple.dst.u.tcp.port = htons(port);
- ret = nf_ct_expect_related(exp);
- if (ret == 0)
+ res = nf_ct_expect_related(exp);
+ if (res == 0)
break;
- else if (ret != -EBUSY) {
+ else if (res != -EBUSY) {
port = 0;
break;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index c04787ce1a71..21bcf471b25a 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -221,7 +221,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
manips not an issue. */
if (maniptype == IP_NAT_MANIP_SRC &&
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
- if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+ /* try the original tuple first */
+ if (in_range(orig_tuple, range)) {
+ if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ *tuple = *orig_tuple;
+ return;
+ }
+ } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+ range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
@@ -266,7 +273,6 @@ nf_nat_setup_info(struct nf_conn *ct,
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
struct nf_conn_nat *nat;
- int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
/* nat helper or nfctnetlink also setup binding */
nat = nfct_nat(ct);
@@ -306,8 +312,7 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_DST_NAT;
}
- /* Place in source hash if this is the first time. */
- if (have_to_hash) {
+ if (maniptype == IP_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@ nf_nat_setup_info(struct nf_conn *ct,
/* It's done. */
if (maniptype == IP_NAT_MANIP_DST)
- set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_DST_NAT_DONE;
else
- set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
@@ -502,7 +507,10 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
int ret = 0;
spin_lock_bh(&nf_nat_lock);
- if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+ if (rcu_dereference_protected(
+ nf_nat_protos[proto->protonum],
+ lockdep_is_held(&nf_nat_lock)
+ ) != &nf_nat_unknown_protocol) {
ret = -EBUSY;
goto out;
}
@@ -532,7 +540,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
if (nat == NULL || nat->ct == NULL)
return;
- NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+ NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@ static void nf_nat_move_storage(void *new, void *old)
struct nf_conn_nat *old_nat = old;
struct nf_conn *ct = old_nat->ct;
- if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+ if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
return;
spin_lock_bh(&nf_nat_lock);
- new_nat->ct = ct;
hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
@@ -679,8 +686,7 @@ static int __net_init nf_nat_net_init(struct net *net)
{
/* Leave them the same for the moment. */
net->ipv4.nat_htable_size = net->ct.htable_size;
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
@@ -702,8 +708,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
- nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- net->ipv4.nat_htable_size);
+ nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ee5f419d0a56..8812a02078ab 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -54,6 +54,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@ static int __init nf_nat_snmp_basic_init(void)
{
int ret = 0;
- ret = nf_conntrack_helper_register(&snmp_helper);
- if (ret < 0)
- return ret;
+ BUG_ON(nf_nat_snmp_hook != NULL);
+ rcu_assign_pointer(nf_nat_snmp_hook, help);
+
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
- nf_conntrack_helper_unregister(&snmp_helper);
+ rcu_assign_pointer(nf_nat_snmp_hook, NULL);
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab786e81..6390ba299b3d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ case SIOCINQ:
+ return -ENOIOCTLCMD;
+ default:
+#ifdef CONFIG_IP_MROUTE
+ return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+ return -ENOIOCTLCMD;
+#endif
+ }
+}
+#endif
+
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
@@ -860,6 +878,7 @@ struct proto raw_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
+ .compat_ioctl = compat_raw_ioctl,
#endif
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 351dc4e85242..242a3de83fbb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -152,6 +152,41 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
{
}
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+
+ peer = rt->peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ } else {
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
+ }
+ }
+ return p;
+}
+
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
@@ -159,6 +194,7 @@ static struct dst_ops ipv4_dst_ops = {
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.default_mtu = ipv4_default_mtu,
+ .cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
@@ -514,7 +550,7 @@ static const struct file_operations rt_cpu_seq_fops = {
.release = seq_release,
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
@@ -567,14 +603,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
if (!pde)
goto err2;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
@@ -588,7 +624,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
@@ -1441,6 +1477,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
+ if (rt->fi)
+ atomic_inc(&rt->fi->fib_clntref);
if (arp_bind_neighbour(&rt->dst) ||
!(rt->dst.neighbour->nud_state &
@@ -1720,6 +1758,10 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
struct rtable *rt = (struct rtable *) dst;
struct inet_peer *peer = rt->peer;
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
if (peer) {
rt->peer = NULL;
inet_putpeer(peer);
@@ -1775,7 +1817,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
memcpy(addr, &src, 4);
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,6 +1857,30 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
return mtu;
}
+static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
+{
+ if (!(rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)) {
+ no_cow:
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
+ }
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ } else {
+ struct inet_peer *peer;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+ if (!peer)
+ goto no_cow;
+ if (inet_metrics_new(peer))
+ memcpy(peer->metrics, fi->fib_metrics,
+ sizeof(u32) * RTAX_MAX);
+ dst_init_metrics(&rt->dst, peer->metrics, false);
+ }
+}
+
static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
{
struct dst_entry *dst = &rt->dst;
@@ -1824,8 +1890,8 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
- dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+ rt_init_metrics(rt, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
}
@@ -1835,7 +1901,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, fib_rules_tclass(res));
#endif
@@ -1891,7 +1957,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -2208,7 +2274,7 @@ local_input:
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -2645,7 +2711,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
else
#endif
if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(net, &fl, &res);
+ fib_select_default(&res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2752,6 +2818,9 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
rt->peer = ort->peer;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
+ rt->fi = ort->fi;
+ if (rt->fi)
+ atomic_inc(&rt->fi->fib_clntref);
dst_free(new);
}
@@ -2828,7 +2897,7 @@ static int rt_fill_info(struct net *net,
}
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid)
NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
#endif
@@ -3249,9 +3318,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
static __initdata unsigned long rhash_entries;
static int __init set_rhash_entries(char *str)
@@ -3267,7 +3336,7 @@ int __init ip_rt_init(void)
{
int rc = 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6c11eece262c..f9867d2dbef4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2549b29b062d..2f692cefd3b0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -817,7 +817,7 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
if (!cwnd)
- cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+ cwnd = TCP_INIT_CWND;
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
@@ -4399,7 +4399,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
- eaten = (chunk == skb->len && !th->fin);
+ eaten = (chunk == skb->len);
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 856f68466d49..02f583b3744a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,7 +1994,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
}
req = req->dl_next;
}
- st->offset = 0;
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
break;
get_req:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8157b17959ee..d37baaa1dbe3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2199,7 +2199,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
return 0;
}
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b057d40addec..19fbdec6baaa 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -196,8 +196,11 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ dst_destroy_metrics_generic(dst);
+
if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
+
xfrm_dst_destroy(xdst);
}
@@ -215,6 +218,7 @@ static struct dst_ops xfrm4_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5b189c97c2fc..fd6782e3a038 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -420,9 +420,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
- printk(KERN_INFO
- "%s: Disabled Privacy Extensions\n",
- dev->name);
ndev->cnf.use_tempaddr = -1;
} else {
in6_dev_hold(ndev);
@@ -2664,14 +2661,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct net *net = dev_net(dev);
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
- LIST_HEAD(keep_list);
- int state;
+ int state, i;
ASSERT_RTNL();
- /* Flush routes if device is being removed or it is not loopback */
- if (how || !(dev->flags & IFF_LOOPBACK))
- rt6_ifdown(net, dev);
+ rt6_ifdown(net, dev);
+ neigh_ifdown(&nd_tbl, dev);
idev = __in6_dev_get(dev);
if (idev == NULL)
@@ -2692,6 +2687,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
}
+ /* Step 2: clear hash table */
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+ struct hlist_head *h = &inet6_addr_lst[i];
+ struct hlist_node *n;
+
+ spin_lock_bh(&addrconf_hash_lock);
+ restart:
+ hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+ if (ifa->idev == idev) {
+ hlist_del_init_rcu(&ifa->addr_lst);
+ addrconf_del_timer(ifa);
+ goto restart;
+ }
+ }
+ spin_unlock_bh(&addrconf_hash_lock);
+ }
+
write_lock_bh(&idev->lock);
/* Step 2: clear flags for stateless addrconf */
@@ -2725,52 +2737,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
- /* If just doing link down, and address is permanent
- and not link-local, then retain it. */
- if (!how &&
- (ifa->flags&IFA_F_PERMANENT) &&
- !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
- list_move_tail(&ifa->if_list, &keep_list);
-
- /* If not doing DAD on this address, just keep it. */
- if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
- idev->cnf.accept_dad <= 0 ||
- (ifa->flags & IFA_F_NODAD))
- continue;
+ list_del(&ifa->if_list);
- /* If it was tentative already, no need to notify */
- if (ifa->flags & IFA_F_TENTATIVE)
- continue;
+ write_unlock_bh(&idev->lock);
- /* Flag it for later restoration when link comes up */
- ifa->flags |= IFA_F_TENTATIVE;
- ifa->state = INET6_IFADDR_STATE_DAD;
- } else {
- list_del(&ifa->if_list);
-
- /* clear hash table */
- spin_lock_bh(&addrconf_hash_lock);
- hlist_del_init_rcu(&ifa->addr_lst);
- spin_unlock_bh(&addrconf_hash_lock);
-
- write_unlock_bh(&idev->lock);
- spin_lock_bh(&ifa->state_lock);
- state = ifa->state;
- ifa->state = INET6_IFADDR_STATE_DEAD;
- spin_unlock_bh(&ifa->state_lock);
-
- if (state != INET6_IFADDR_STATE_DEAD) {
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain,
- NETDEV_DOWN, ifa);
- }
+ spin_lock_bh(&ifa->state_lock);
+ state = ifa->state;
+ ifa->state = INET6_IFADDR_STATE_DEAD;
+ spin_unlock_bh(&ifa->state_lock);
- in6_ifa_put(ifa);
- write_lock_bh(&idev->lock);
+ if (state != INET6_IFADDR_STATE_DEAD) {
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
}
- }
+ in6_ifa_put(ifa);
- list_splice(&keep_list, &idev->addr_list);
+ write_lock_bh(&idev->lock);
+ }
write_unlock_bh(&idev->lock);
@@ -4159,8 +4142,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->dst);
- if (ifp->state == INET6_IFADDR_STATE_DEAD &&
- ip6_del_rt(ifp->rt))
+ if (ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->dst);
break;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 978e80e2c4a8..3194aa909872 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -772,7 +772,7 @@ out:
return err;
}
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7d227c644f72..47b7b8df7fac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1076,6 +1076,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET6, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1679,6 +1680,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET6);
+ xt_compat_init_offsets(AF_INET6, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c88891a753..05027b753721 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -452,8 +452,7 @@ ip6t_log_packet(u_int8_t pf,
in ? in->name : "",
out ? out->name : "");
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 79d43aa8fa8d..085727263812 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,6 +45,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@ static struct inet_frags nf_frags;
static struct netns_frags nf_init_frags;
#ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
.procname = "nf_conntrack_frag6_timeout",
.data = &nf_init_frags.timeout,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c39526ba5e..2bc6cd7bb8ec 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -123,18 +123,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, NULL);
synchronize_rcu();
@@ -192,10 +192,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
- int (*filter)(struct sock *sock, struct sk_buff *skb);
+ mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
- filtered = filter ? filter(sk, skb) : 0;
+ filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd0416f69..72609f1c6158 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
#define RT6_TRACE(x...) do { ; } while (0)
#endif
-#define CLONE_OFFLINK_ROUTE 0
-
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -99,6 +97,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
struct in6_addr *gwaddr, int ifindex);
#endif
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+
+ peer = rt->rt6i_peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
@@ -107,6 +135,7 @@ static struct dst_ops ip6_dst_ops_template = {
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
.default_mtu = ip6_default_mtu,
+ .cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
@@ -127,6 +156,10 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+ [RTAX_HOPLIMIT - 1] = 255,
+};
+
static struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
@@ -196,7 +229,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
if (peer) {
- BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
rt->rt6i_peer = NULL;
inet_putpeer(peer);
}
@@ -206,9 +238,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
- if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
- return;
-
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
@@ -738,13 +767,8 @@ restart:
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
- else {
-#if CLONE_OFFLINK_ROUTE
+ else
nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
- goto out2;
-#endif
- }
dst_release(&rt->dst);
rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2688,7 +2712,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_null_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+ ip6_template_metrics, true);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2699,7 +2724,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_prohibit_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+ ip6_template_metrics, true);
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2709,7 +2735,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_blk_hole_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
#endif
net->ipv6.sysctl.flush_delay = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..b1599a345c10 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
do {
- n = p->next;
+ n = rcu_dereference_protected(p->next, 1);
kfree(p);
p = n;
} while (p);
@@ -421,15 +421,17 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
static int
ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
{
- struct ip_tunnel_prl_entry *x, **p;
+ struct ip_tunnel_prl_entry *x;
+ struct ip_tunnel_prl_entry __rcu **p;
int err = 0;
ASSERT_RTNL();
if (a && a->addr != htonl(INADDR_ANY)) {
- for (p = &t->prl; *p; p = &(*p)->next) {
- if ((*p)->addr == a->addr) {
- x = *p;
+ for (p = &t->prl;
+ (x = rtnl_dereference(*p)) != NULL;
+ p = &x->next) {
+ if (x->addr == a->addr) {
*p = x->next;
call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
t->prl_count--;
@@ -438,9 +440,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
}
err = -ENXIO;
} else {
- if (t->prl) {
+ x = rtnl_dereference(t->prl);
+ if (x) {
t->prl_count = 0;
- x = t->prl;
call_rcu(&x->rcu_head, prl_list_destroy_rcu);
t->prl = NULL;
}
@@ -1179,7 +1181,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
- sitn->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
}
@@ -1196,11 +1198,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = sitn->tunnels[prio][h];
+ struct ip_tunnel *t;
+ t = rtnl_dereference(sitn->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9a009c66c8a3..a419a787eb69 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1299,7 +1299,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
return 0;
}
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023ea6e4..834dc02f1d4f 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+ atomic_inc(&rt->rt6i_peer->refcnt);
+
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,9 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
+ dst_destroy_metrics_generic(dst);
+ if (likely(xdst->u.rt6.rt6i_peer))
+ inet_putpeer(xdst->u.rt6.rt6i_peer);
xfrm_dst_destroy(xdst);
}
@@ -251,6 +258,7 @@ static struct dst_ops xfrm6_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
.local_out = __ip6_local_out,
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 9109262abd24..c766056d0488 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@ config MAC80211_HAS_RC
def_bool n
config MAC80211_RC_PID
- bool "PID controller based rate control algorithm" if EMBEDDED
+ bool "PID controller based rate control algorithm" if EXPERT
select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@ config MAC80211_RC_PID
rate.
config MAC80211_RC_MINSTREL
- bool "Minstrel" if EMBEDDED
+ bool "Minstrel" if EXPERT
select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
config MAC80211_RC_MINSTREL_HT
- bool "Minstrel 802.11n support" if EMBEDDED
+ bool "Minstrel 802.11n support" if EXPERT
depends on MAC80211_RC_MINSTREL
default y
---help---
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1534f2b44caf..82a6e0d80f05 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -85,6 +85,17 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_TIMESTAMP
+ bool 'Connection tracking timestamping'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timestamping.
+ This allows you to store the flow start-time and to obtain
+ the flow-stop time (once it has been destroyed) via Connection
+ tracking events.
+
+ If unsure, say `N'.
+
config NF_CT_PROTO_DCCP
tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -185,9 +196,13 @@ config NF_CONNTRACK_IRC
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_BROADCAST
+ tristate
+
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@ config NF_CONNTRACK_NETBIOS_NS
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_SNMP
+ tristate "SNMP service protocol support"
+ depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
+ help
+ SNMP service requests are sent as broadcast messages from an
+ unprivileged port and responded to with unicast messages to the
+ same port. This make them hard to firewall properly because connection
+ tracking doesn't deal with broadcasts. This helper tracks locally
+ originating SNMP service requests and the corresponding
+ responses. It relies on correct IP address configuration, specifically
+ netmask and broadcast address.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NF_CONNTRACK_PPTP
tristate "PPtP protocol support"
depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@ config NETFILTER_XT_CONNMARK
ctmark), similarly to the packet mark (nfmark). Using this
target and match, you can set and match on this mark.
+config NETFILTER_XT_SET
+ tristate 'set target and match support'
+ depends on IP_SET
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds the "SET" target and "set" match.
+
+ Using this target and match, you can add/delete and match
+ elements in the sets created by ipset(8).
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# alphabetically ordered list of targets
comment "Xtables targets"
+config NETFILTER_XT_TARGET_AUDIT
+ tristate "AUDIT target support"
+ depends on AUDIT
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a 'AUDIT' target, which can be used to create
+ audit records for packets dropped/accepted.
+
+ To compileit as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@ config NETFILTER_XT_TARGET_NFLOG
config NETFILTER_XT_TARGET_NFQUEUE
tristate '"NFQUEUE" target Support'
depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_QUEUE
help
This target replaced the old obsolete QUEUE target.
@@ -685,6 +738,15 @@ config NETFILTER_XT_MATCH_DCCP
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_DEVGROUP
+ tristate '"devgroup" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This options adds a `devgroup' match, which allows to match on the
+ device group a network device is assigned to.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_DSCP
tristate '"dscp" and "tos" match support'
depends on NETFILTER_ADVANCED
@@ -886,7 +948,7 @@ config NETFILTER_XT_MATCH_RATEEST
config NETFILTER_XT_MATCH_REALM
tristate '"realm" match support'
depends on NETFILTER_ADVANCED
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
help
This option adds a `realm' match, which allows you to use the realm
key from the routing subsystem inside iptables.
@@ -1011,4 +1073,6 @@ endif # NETFILTER_XTABLES
endmenu
+source "net/netfilter/ipset/Kconfig"
+
source "net/netfilter/ipvs/Kconfig"
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 441050f31111..d57a890eaee5 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
# combos
obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
# targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,6 +77,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +107,8 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
# IPVS
obj-$(CONFIG_IP_VS) += ipvs/
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe290c04..1e00bf7d27c5 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -175,13 +175,21 @@ next_hook:
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
kfree_skb(skb);
- ret = -(verdict >> NF_VERDICT_BITS);
+ ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ECANCELED)
+ goto next_hook;
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
+ ret = 0;
}
rcu_read_unlock();
return ret;
@@ -214,7 +222,7 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -231,7 +239,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
}
EXPORT_SYMBOL(nf_ct_attach);
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
EXPORT_SYMBOL(nf_ct_destroy);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644
index 000000000000..3b970d343023
--- /dev/null
+++ b/net/netfilter/ipset/Kconfig
@@ -0,0 +1,121 @@
+menuconfig IP_SET
+ tristate "IP set support"
+ depends on INET && NETFILTER
+ help
+ This option adds IP set support to the kernel.
+ In order to define and use the sets, you need the userspace utility
+ ipset(8). You can use the sets in netfilter via the "set" match
+ and "SET" target.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+ int "Maximum number of IP sets"
+ default 256
+ range 2 65534
+ depends on IP_SET
+ help
+ You can define here default value of the maximum number
+ of IP sets for the kernel.
+
+ The value can be overriden by the 'max_sets' module
+ parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+ tristate "bitmap:ip set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip set type support, by which one
+ can store IPv4 addresses (or network addresse) from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+ tristate "bitmap:ip,mac set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip,mac set type support, by which one
+ can store IPv4 address and (source) MAC address pairs from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+ tristate "bitmap:port set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:port set type support, by which one
+ can store TCP/UDP port numbers from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IP
+ tristate "hash:ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip set type support, by which one
+ can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+ in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+ tristate "hash:ip,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port set type support, by which one
+ can store IPv4/IPv6 address and protocol/port pairs.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+ tristate "hash:ip,port,ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,ip set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ address triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+ tristate "hash:ip,port,net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,net set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ network address/prefix triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NET
+ tristate "hash:net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net set type support, by which
+ one can store IPv4/IPv6 network address/prefix elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+ tristate "hash:net,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net,port set type support, by which
+ one can store IPv4/IPv6 network address/prefix and
+ protocol/port pairs as elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_LIST_SET
+ tristate "list:set set support"
+ depends on IP_SET
+ help
+ This option adds the list:set set type support. In this
+ kind of set one can store the name of other sets and it forms
+ an ordered union of the member sets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644
index 000000000000..5adbdab67bd2
--- /dev/null
+++ b/net/netfilter/ipset/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644
index 000000000000..bca96990218d
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 elements; /* number of max elements in the set */
+ u32 hosts; /* number of hosts in a subnet */
+ size_t memsize; /* members size */
+ u8 netmask; /* subnet netmask */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+ return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *adt, *nested;
+ u32 id, first = cb->args[2];
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip;
+
+ ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ ip = ip_to_id(map, ip);
+
+ return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 ip, ip_to, id;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = ip_to_id(map, ip);
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to) {
+ swap(ip, ip_to);
+ if (ip < map->first_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ if (ip_to > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; !before(ip_to, ip); ip += map->hosts) {
+ id = ip_to_id(map, ip);
+ ret = adtfn(set, &id, timeout);;
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ if (map->netmask != 32)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ip *x = a->data;
+ const struct bitmap_ip *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->netmask == y->netmask &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_add,
+ [IPSET_DEL] = bitmap_ip_del,
+ [IPSET_TEST] = bitmap_ip_test,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_list,
+ .same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_tadd,
+ [IPSET_DEL] = bitmap_ip_tdel,
+ [IPSET_TEST] = bitmap_ip_ttest,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_tlist,
+ .same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ip *map = set->data;
+ unsigned long *table = map->members;
+ u32 id;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id < map->elements; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ip_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+ u32 first_ip, u32 last_ip,
+ u32 elements, u32 hosts, u8 netmask)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->elements = elements;
+ map->hosts = hosts;
+ map->netmask = netmask;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct bitmap_ip *map;
+ u32 first_ip, last_ip, hosts, elements;
+ u8 netmask = 32;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if (netmask > 32)
+ return -IPSET_ERR_INVALID_NETMASK;
+
+ first_ip &= ip_set_hostmask(netmask);
+ last_ip |= ~ip_set_hostmask(netmask);
+ }
+
+ if (netmask == 32) {
+ hosts = 1;
+ elements = last_ip - first_ip + 1;
+ } else {
+ u8 mask_bits;
+ u32 mask;
+
+ mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+ if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+ netmask <= mask_bits)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+ hosts = 2 << (32 - netmask - 1);
+ elements = 2 << (netmask - mask_bits - 1);
+ }
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = elements * sizeof(unsigned long);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tip;
+
+ bitmap_ip_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, elements - 1);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_ip;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+ .name = "bitmap:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+ return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644
index 000000000000..5e790172deff
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+ MAC_EMPTY, /* element is not set */
+ MAC_FILLED, /* element is set with MAC */
+ MAC_UNSET, /* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collector */
+ size_t dsize; /* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+ u32 id; /* id in array */
+ unsigned char *ether; /* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+ unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+ return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+ return elem->match == MAC_UNSET ||
+ (elem->match == MAC_FILLED &&
+ !ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0;
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ break;
+ case MAC_FILLED:
+ return -IPSET_ERR_EXIST;
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY)
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_elem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_EMPTY)
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return (data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0) &&
+ !bitmap_expired(map, data->id);
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address and activate the timer */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ if (timeout == map->timeout)
+ /* Timeout was not specified, get stored one */
+ timeout = elem->timeout;
+ elem->timeout = ip_set_timeout_set(timeout);
+ break;
+ case MAC_FILLED:
+ if (!bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+ /* Fall through */
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ /* If MAC is unset yet, we store plain timeout value
+ * because the timer is not activated yet
+ * and we can reuse it later when MAC is filled out,
+ * possibly by the kernel */
+ elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+ : timeout;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_telem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 timeout, last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (!bitmap_ipmac_exist(elem))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ timeout = elem->match == MAC_UNSET ? elem->timeout
+ : ip_set_timeout_get(elem->timeout);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+
+ data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ /* Backward compatibility: we don't check the second flag */
+ if (skb_mac_header(skb) < skb->head ||
+ (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+ return -EINVAL;
+
+ data.id -= map->first_ip;
+ data.ether = eth_hdr(skb)->h_source;
+
+ return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+ u32 timeout = map->timeout;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+ if (ret)
+ return ret;
+
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_ETHER])
+ data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+ else
+ data.ether = NULL;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ data.id -= map->first_ip;
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ memset(map->members, 0,
+ (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map)
+ + (map->last_ip - map->first_ip + 1) * map->dsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ipmac *x = a->data;
+ const struct bitmap_ipmac *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_add,
+ [IPSET_DEL] = bitmap_ipmac_del,
+ [IPSET_TEST] = bitmap_ipmac_test,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_list,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_tadd,
+ [IPSET_DEL] = bitmap_ipmac_tdel,
+ [IPSET_TEST] = bitmap_ipmac_ttest,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_tlist,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ipmac *map = set->data;
+ struct ipmac_telem *elem;
+ u32 id, last = map->last_ip - map->first_ip;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++) {
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_FILLED &&
+ ip_set_timeout_expired(elem->timeout))
+ elem->match = MAC_EMPTY;
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ipmac_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+ u32 first_ip, u32 last_ip)
+{
+ map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ u32 first_ip, last_ip, elements;
+ struct bitmap_ipmac *map;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ elements = last_ip - first_ip + 1;
+
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->dsize = sizeof(struct ipmac_telem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = &bitmap_tipmac;
+
+ bitmap_ipmac_gc_init(set);
+ } else {
+ map->dsize = sizeof(struct ipmac_elem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ set->variant = &bitmap_ipmac;
+
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+ .name = "bitmap:ip,mac",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ipmac_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+ return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644
index 000000000000..165f09b1a9cb
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+ void *members; /* the set members */
+ u16 first_port; /* host byte order, included in range */
+ u16 last_port; /* host byte order, included in range */
+ size_t memsize; /* members size */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *atd, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *adt, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be16 __port;
+ u16 port = 0;
+
+ if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+ return -EINVAL;
+
+ port = ntohs(__port);
+
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ port -= map->first_port;
+
+ return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 port; /* wraparound */
+ u16 id, port_to;
+ int ret = 0;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = port - map->first_port;
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to) {
+ swap(port, port_to);
+ if (port < map->first_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else
+ port_to = port;
+
+ if (port_to > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; port <= port_to; port++) {
+ id = port - map->first_port;
+ ret = adtfn(set, &id, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_port *x = a->data;
+ const struct bitmap_port *y = b->data;
+
+ return x->first_port == y->first_port &&
+ x->last_port == y->last_port &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_add,
+ [IPSET_DEL] = bitmap_port_del,
+ [IPSET_TEST] = bitmap_port_test,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_list,
+ .same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_tadd,
+ [IPSET_DEL] = bitmap_port_tdel,
+ [IPSET_TEST] = bitmap_port_ttest,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_tlist,
+ .same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_port *map = set->data;
+ unsigned long *table = map->members;
+ u32 id; /* wraparound */
+ u16 last = map->last_port - map->first_port;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_port_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+ u16 first_port, u16 last_port)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_port = first_port;
+ map->last_port = last_port;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_UNSPEC;
+
+ return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ struct bitmap_port *map;
+ u16 first_port, last_port;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (first_port > last_port) {
+ u16 tmp = first_port;
+
+ first_port = last_port;
+ last_port = tmp;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = (last_port - first_port + 1)
+ * sizeof(unsigned long);
+
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tport;
+
+ bitmap_port_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, last_port - first_port);
+ pr_debug("memsize: %zu\n", map->memsize);
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_port;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+ .name = "bitmap:port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = bitmap_port_create,
+ .create_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+ return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+ ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644
index 000000000000..8b1a54c1e400
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list); /* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list; /* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+ mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+ mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+ struct ip_set_type *type;
+
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC) &&
+ type->revision == revision)
+ return type;
+ return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+ nfnl_unlock();
+ pr_debug("try to load ip_set_%s\n", name);
+ if (request_module("ip_set_%s", name) < 0) {
+ pr_warning("Can't find ip_set type %s\n", name);
+ nfnl_lock();
+ return -IPSET_ERR_FIND_TYPE;
+ }
+ nfnl_lock();
+ return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+ struct ip_set_type **found)
+{
+ rcu_read_lock();
+ *found = find_set_type(name, family, revision);
+ if (*found) {
+ int err = !try_module_get((*found)->me);
+ rcu_read_unlock();
+ return err ? -EFAULT : 0;
+ }
+ rcu_read_unlock();
+
+ return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+ struct ip_set_type *type;
+ bool found = false;
+
+ *min = *max = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC)) {
+ found = true;
+ if (type->revision < *min)
+ *min = type->revision;
+ else if (type->revision > *max)
+ *max = type->revision;
+ }
+ rcu_read_unlock();
+ if (found)
+ return 0;
+
+ return try_to_load_type(name);
+}
+
+#define family_name(f) ((f) == AF_INET ? "inet" : \
+ (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+ int ret = 0;
+
+ if (type->protocol != IPSET_PROTOCOL) {
+ pr_warning("ip_set type %s, family %s, revision %u uses "
+ "wrong protocol version %u (want %u)\n",
+ type->name, family_name(type->family),
+ type->revision, type->protocol, IPSET_PROTOCOL);
+ return -EINVAL;
+ }
+
+ ip_set_type_lock();
+ if (find_set_type(type->name, type->family, type->revision)) {
+ /* Duplicate! */
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "already registered!\n", type->name,
+ family_name(type->family), type->revision);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ list_add_rcu(&type->list, &ip_set_type_list);
+ pr_debug("type %s, family %s, revision %u registered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+ ip_set_type_lock();
+ if (!find_set_type(type->name, type->family, type->revision)) {
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "not registered\n", type->name,
+ family_name(type->family), type->revision);
+ goto unlock;
+ }
+ list_del_rcu(&type->list);
+ pr_debug("type %s, family %s, revision %u unregistered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+ void *members = NULL;
+
+ if (size < KMALLOC_MAX_SIZE)
+ members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ if (members) {
+ pr_debug("%p: allocated with kmalloc\n", members);
+ return members;
+ }
+
+ members = vzalloc(size);
+ if (!members)
+ return NULL;
+ pr_debug("%p: allocated with vmalloc\n", members);
+
+ return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+ pr_debug("%p: free with %s\n", members,
+ is_vmalloc_addr(members) ? "vfree" : "kfree");
+ if (is_vmalloc_addr(members))
+ vfree(members);
+ else
+ kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+ [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
+ [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+ return -IPSET_ERR_PROTOCOL;
+
+ *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+ return -IPSET_ERR_PROTOCOL;
+
+ memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+ sizeof(struct in6_addr));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+ atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+ atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+ read_unlock_bh(&set->lock);
+
+ if (ret == -EAGAIN) {
+ /* Type requests element to be completed */
+ pr_debug("element must be competed, ADD is triggered\n");
+ write_lock_bh(&set->lock);
+ set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+ ret = 1;
+ }
+
+ /* Convert error codes to nomatch */
+ return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ struct ip_set *s;
+
+ for (i = 0; i < ip_set_max; i++) {
+ s = ip_set_list[i];
+ if (s != NULL && STREQ(s->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ *set = s;
+ }
+ }
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+ const struct ip_set *set = ip_set_list[index];
+
+ BUG_ON(set == NULL);
+ BUG_ON(atomic_read(&set->ref) == 0);
+
+ /* Referenced, so it's safe */
+ return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+ struct ip_set *s;
+ ip_set_id_t index;
+
+ nfnl_lock();
+ index = ip_set_get_byname(name, &s);
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+ if (index > ip_set_max)
+ return IPSET_INVALID_ID;
+
+ nfnl_lock();
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IPSET_INVALID_ID;
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+ nfnl_lock();
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+ nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+ return !tb[IPSET_ATTR_PROTOCOL] ||
+ nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+ enum ipset_cmd cmd)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+
+ nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+ sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ return NULL;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_INET;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1},
+ [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ const struct ip_set *set;
+
+ for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+ set = ip_set_list[i];
+ if (set != NULL && STREQ(set->name, name))
+ index = i;
+ }
+ return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+ ip_set_id_t index = find_set_id(name);
+
+ return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+ ip_set_id_t i;
+
+ *index = IPSET_INVALID_ID;
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL) {
+ if (*index == IPSET_INVALID_ID)
+ *index = i;
+ } else if (STREQ(name, ip_set_list[i]->name)) {
+ /* Name clash */
+ *set = ip_set_list[i];
+ return -EEXIST;
+ }
+ }
+ if (*index == IPSET_INVALID_ID)
+ /* No free slot remained */
+ return -IPSET_ERR_MAX_SETS;
+ return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set, *clash;
+ ip_set_id_t index = IPSET_INVALID_ID;
+ struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+ const char *name, *typename;
+ u8 family, revision;
+ u32 flags = flag_exist(nlh);
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_REVISION] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA]))))
+ return -IPSET_ERR_PROTOCOL;
+
+ name = nla_data(attr[IPSET_ATTR_SETNAME]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+ pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+ name, typename, family_name(family), revision);
+
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
+ */
+ set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
+ rwlock_init(&set->lock);
+ strlcpy(set->name, name, IPSET_MAXNAMELEN);
+ atomic_set(&set->ref, 0);
+ set->family = family;
+
+ /*
+ * Next, check that we know the type, and take
+ * a reference on the type, to make sure it stays available
+ * while constructing our new set.
+ *
+ * After referencing the type, we try to create the type
+ * specific part of the set without holding any locks.
+ */
+ ret = find_set_type_get(typename, family, revision, &(set->type));
+ if (ret)
+ goto out;
+
+ /*
+ * Without holding any locks, create private part.
+ */
+ if (attr[IPSET_ATTR_DATA] &&
+ nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+ set->type->create_policy)) {
+ ret = -IPSET_ERR_PROTOCOL;
+ goto put_out;
+ }
+
+ ret = set->type->create(set, tb, flags);
+ if (ret != 0)
+ goto put_out;
+
+ /* BTW, ret==0 here. */
+
+ /*
+ * Here, we have a valid, constructed set and we are protected
+ * by nfnl_lock. Find the first free index in ip_set_list and
+ * check clashing.
+ */
+ if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+ /* If this is the same set and requested, ignore error */
+ if (ret == -EEXIST &&
+ (flags & IPSET_FLAG_EXIST) &&
+ STREQ(set->type->name, clash->type->name) &&
+ set->type->family == clash->type->family &&
+ set->type->revision == clash->type->revision &&
+ set->variant->same_set(set, clash))
+ ret = 0;
+ goto cleanup;
+ }
+
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
+ pr_debug("create: '%s' created with index %u!\n", set->name, index);
+ ip_set_list[index] = set;
+
+ return ret;
+
+cleanup:
+ set->variant->destroy(set);
+put_out:
+ module_put(set->type->me);
+out:
+ kfree(set);
+ return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+ struct ip_set *set = ip_set_list[index];
+
+ pr_debug("set: %s\n", set->name);
+ ip_set_list[index] = NULL;
+
+ /* Must call it without holding any lock */
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ /* References are protected by the nfnl mutex */
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ (atomic_read(&ip_set_list[i]->ref)))
+ return -IPSET_ERR_BUSY;
+ }
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL)
+ ip_set_destroy_set(i);
+ }
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+ else if (atomic_read(&ip_set_list[i]->ref))
+ return -IPSET_ERR_BUSY;
+
+ ip_set_destroy_set(i);
+ }
+ return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+ pr_debug("set: %s\n", set->name);
+
+ write_lock_bh(&set->lock);
+ set->variant->flush(set);
+ write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -EPROTO;
+
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++)
+ if (ip_set_list[i] != NULL)
+ ip_set_flush_set(ip_set_list[i]);
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ ip_set_flush_set(ip_set_list[i]);
+ }
+
+ return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ const char *name2;
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+ if (atomic_read(&set->ref) != 0)
+ return -IPSET_ERR_REFERENCED;
+
+ name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ STREQ(ip_set_list[i]->name, name2))
+ return -IPSET_ERR_EXIST_SETNAME2;
+ }
+ strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+ return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *from, *to;
+ ip_set_id_t from_id, to_id;
+ char from_name[IPSET_MAXNAMELEN];
+ u32 from_ref;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (from_id == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+ if (to_id == IPSET_INVALID_ID)
+ return -IPSET_ERR_EXIST_SETNAME2;
+
+ from = ip_set_list[from_id];
+ to = ip_set_list[to_id];
+
+ /* Features must not change.
+ * Not an artifical restriction anymore, as we must prevent
+ * possible loops created by swapping in setlist type of sets. */
+ if (!(from->type->features == to->type->features &&
+ from->type->family == to->type->family))
+ return -IPSET_ERR_TYPE_MISMATCH;
+
+ /* No magic here: ref munging protected by the nfnl_lock */
+ strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
+
+ strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
+
+ ip_set_list[from_id] = to;
+ ip_set_list[to_id] = from;
+
+ return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT 0L
+#define DUMP_ALL 1L
+#define DUMP_ONE 2L
+#define DUMP_LAST 3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+ if (cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+ __ip_set_put((ip_set_id_t) cb->args[1]);
+ }
+ return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+ const struct nlattr *attr;
+ int rem;
+
+ pr_debug("dump nlmsg\n");
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+ pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+ }
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *attr = (void *)nlh + min_len;
+ ip_set_id_t index;
+
+ /* Second pass, so parser can't fail */
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+ /* cb->args[0] : dump single set/all sets
+ * [1] : set index
+ * [..]: type specific
+ */
+
+ if (!cda[IPSET_ATTR_SETNAME]) {
+ cb->args[0] = DUMP_ALL;
+ return 0;
+ }
+
+ index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ cb->args[0] = DUMP_ONE;
+ cb->args[1] = index;
+ return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ ip_set_id_t index = IPSET_INVALID_ID, max;
+ struct ip_set *set = NULL;
+ struct nlmsghdr *nlh = NULL;
+ unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+ int ret = 0;
+
+ if (cb->args[0] == DUMP_INIT) {
+ ret = dump_init(cb);
+ if (ret < 0) {
+ nlh = nlmsg_hdr(cb->skb);
+ /* We have to create and send the error message
+ * manually :-( */
+ if (nlh->nlmsg_flags & NLM_F_ACK)
+ netlink_ack(cb->skb, nlh, ret);
+ return ret;
+ }
+ }
+
+ if (cb->args[1] >= ip_set_max)
+ goto out;
+
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+ max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+ for (; cb->args[1] < max; cb->args[1]++) {
+ index = (ip_set_id_t) cb->args[1];
+ set = ip_set_list[index];
+ if (set == NULL) {
+ if (cb->args[0] == DUMP_ONE) {
+ ret = -ENOENT;
+ goto out;
+ }
+ continue;
+ }
+ /* When dumping all sets, we must dump "sorted"
+ * so that lists (unions of sets) are dumped last.
+ */
+ if (cb->args[0] != DUMP_ONE &&
+ !((cb->args[0] == DUMP_ALL) ^
+ (set->type->features & IPSET_DUMP_LAST)))
+ continue;
+ pr_debug("List set: %s\n", set->name);
+ if (!cb->args[2]) {
+ /* Start listing: make sure set won't be destroyed */
+ pr_debug("reference set\n");
+ __ip_set_get(index);
+ }
+ nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, flags,
+ IPSET_CMD_LIST);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto release_refcount;
+ }
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+ switch (cb->args[2]) {
+ case 0:
+ /* Core header data */
+ NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+ set->type->name);
+ NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+ set->family);
+ NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+ set->type->revision);
+ ret = set->variant->head(set, skb);
+ if (ret < 0)
+ goto release_refcount;
+ /* Fall through and add elements */
+ default:
+ read_lock_bh(&set->lock);
+ ret = set->variant->list(set, skb, cb);
+ read_unlock_bh(&set->lock);
+ if (!cb->args[2]) {
+ /* Set is done, proceed with next one */
+ if (cb->args[0] == DUMP_ONE)
+ cb->args[1] = IPSET_INVALID_ID;
+ else
+ cb->args[1]++;
+ }
+ goto release_refcount;
+ }
+ }
+ goto out;
+
+nla_put_failure:
+ ret = -EFAULT;
+release_refcount:
+ /* If there was an error or set is done, release set */
+ if (ret || !cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[index]->name);
+ __ip_set_put(index);
+ }
+
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+ cb->args[0] = DUMP_LAST;
+
+out:
+ if (nlh) {
+ nlmsg_end(skb, nlh);
+ pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+ dump_attrs(nlh);
+ }
+
+ return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ return netlink_dump_start(ctnl, skb, nlh,
+ ip_set_dump_start,
+ ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+ struct nlattr *tb[], enum ipset_adt adt,
+ u32 flags, bool use_lineno)
+{
+ int ret, retried = 0;
+ u32 lineno = 0;
+ bool eexist = flags & IPSET_FLAG_EXIST;
+
+ do {
+ write_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+ write_unlock_bh(&set->lock);
+ } while (ret == -EAGAIN &&
+ set->variant->resize &&
+ (ret = set->variant->resize(set, retried++)) == 0);
+
+ if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+ return 0;
+ if (lineno && use_lineno) {
+ /* Error in restore/batch mode: send back lineno */
+ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+ struct sk_buff *skb2;
+ struct nlmsgerr *errmsg;
+ size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *cmdattr;
+ u32 *errline;
+
+ skb2 = nlmsg_new(payload, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+ rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+ errmsg = nlmsg_data(rep);
+ errmsg->error = ret;
+ memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+ cmdattr = (void *)&errmsg->msg + min_len;
+
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ cmdattr, nlh->nlmsg_len - min_len,
+ ip_set_adt_policy);
+
+ errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+ *errline = lineno;
+
+ netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ /* Signal netlink not to send its ACK/errmsg. */
+ return -EINTR;
+ }
+
+ return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(*tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_DATA] == NULL ||
+ !flag_nested(attr[IPSET_ATTR_DATA])))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+ read_unlock_bh(&set->lock);
+ /* Userspace can't trigger element to be re-added */
+ if (ret == -EAGAIN)
+ ret = 1;
+
+ return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ const struct ip_set *set;
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ ip_set_id_t index;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+ set = ip_set_list[index];
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_HEADER);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ u8 family, min, max;
+ const char *typename;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ ret = find_set_type_minmax(typename, family, &min, &max);
+ if (ret)
+ return ret;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_TYPE);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+ nlmsg_end(skb2, nlh2);
+
+ pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ int ret = 0;
+
+ if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_PROTOCOL);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+ [IPSET_CMD_CREATE] = {
+ .call = ip_set_create,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_create_policy,
+ },
+ [IPSET_CMD_DESTROY] = {
+ .call = ip_set_destroy,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_FLUSH] = {
+ .call = ip_set_flush,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_RENAME] = {
+ .call = ip_set_rename,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_SWAP] = {
+ .call = ip_set_swap,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_LIST] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_SAVE] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_ADD] = {
+ .call = ip_set_uadd,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_DEL] = {
+ .call = ip_set_udel,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_TEST] = {
+ .call = ip_set_utest,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_HEADER] = {
+ .call = ip_set_header,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_TYPE] = {
+ .call = ip_set_type,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_type_policy,
+ },
+ [IPSET_CMD_PROTOCOL] = {
+ .call = ip_set_protocol,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_protocol_policy,
+ },
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+ .name = "ip_set",
+ .subsys_id = NFNL_SUBSYS_IPSET,
+ .cb_count = IPSET_MSG_MAX,
+ .cb = ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+ unsigned *op;
+ void *data;
+ int copylen = *len, ret = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optval != SO_IP_SET)
+ return -EBADF;
+ if (*len < sizeof(unsigned))
+ return -EINVAL;
+
+ data = vmalloc(*len);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, user, *len) != 0) {
+ ret = -EFAULT;
+ goto done;
+ }
+ op = (unsigned *) data;
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IPSET_PROTOCOL) {
+ ret = -EPROTO;
+ goto done;
+ }
+ }
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
+ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ req_version->version = IPSET_PROTOCOL;
+ ret = copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version));
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+ nfnl_lock();
+ req_get->set.index = find_set_id(req_get->set.name);
+ nfnl_unlock();
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set) ||
+ req_get->set.index >= ip_set_max) {
+ ret = -EINVAL;
+ goto done;
+ }
+ nfnl_lock();
+ strncpy(req_get->set.name,
+ ip_set_list[req_get->set.index]
+ ? ip_set_list[req_get->set.index]->name : "",
+ IPSET_MAXNAMELEN);
+ nfnl_unlock();
+ goto copy;
+ }
+ default:
+ ret = -EBADMSG;
+ goto done;
+ } /* end of switch(op) */
+
+copy:
+ ret = copy_to_user(user, data, copylen);
+
+done:
+ vfree(data);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+ .pf = PF_INET,
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
+ .owner = THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+ int ret;
+
+ if (max_sets)
+ ip_set_max = max_sets;
+ if (ip_set_max >= IPSET_INVALID_ID)
+ ip_set_max = IPSET_INVALID_ID - 1;
+
+ ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+ GFP_KERNEL);
+ if (!ip_set_list) {
+ pr_err("ip_set: Unable to create ip_set_list\n");
+ return -ENOMEM;
+ }
+
+ ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ if (ret != 0) {
+ pr_err("ip_set: cannot register with nfnetlink.\n");
+ kfree(ip_set_list);
+ return ret;
+ }
+ ret = nf_register_sockopt(&so_set);
+ if (ret != 0) {
+ pr_err("SO_SET registry failed: %d\n", ret);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ return ret;
+ }
+
+ pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+ return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+ /* There can't be any existing set */
+ nf_unregister_sockopt(&so_set);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644
index 000000000000..8d5227212686
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+ bool src, __be16 *port, u8 *proto)
+{
+ switch (protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? th->source : th->dest;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr _udph;
+ const struct udphdr *uh;
+
+ uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? uh->source : uh->dest;
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr _ich;
+ const struct icmphdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)htons((ic->type << 8) | ic->code);
+ break;
+ }
+ case IPPROTO_ICMPV6: {
+ struct icmp6hdr _ich;
+ const struct icmp6hdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)
+ htons((ic->icmp6_type << 8) | ic->icmp6_code);
+ break;
+ }
+ default:
+ break;
+ }
+ *proto = protocol;
+
+ return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ unsigned int protooff = ip_hdrlen(skb);
+ int protocol = iph->protocol;
+
+ /* See comments at tcp_match in ip_tables.c */
+ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+ return false;
+
+ return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ int protoff;
+ u8 nexthdr;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ if (protoff < 0)
+ return false;
+
+ return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+ bool ret;
+ u8 proto;
+
+ switch (pf) {
+ case AF_INET:
+ ret = ip_set_get_ip4_port(skb, src, port, &proto);
+ break;
+ case AF_INET6:
+ ret = ip_set_get_ip6_port(skb, src, port, &proto);
+ break;
+ default:
+ return false;
+ }
+ if (!ret)
+ return ret;
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644
index 000000000000..43bcce200129
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set hash_ip_same_set
+#define hash_ip6_same_set hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+ __be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+ __be32 ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+ const struct hash_ip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+ return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+ dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+ elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ const struct hash_ip4_telem *tdata =
+ (const struct hash_ip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be32 ip;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+ ip &= ip_set_netmask(h->netmask);
+ if (ip == 0)
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip, ip_to, hosts, timeout = h->timeout;
+ __be32 nip;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip &= ip_set_hostmask(h->netmask);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ return adtfn(set, &nip, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+ for (; !before(ip_to, ip); ip += hosts) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ ret = adtfn(set, &nip, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout &&
+ x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+ union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+ union nf_inet_addr ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+ const struct hash_ip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+ return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+ ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ const struct hash_ip6_telem *e =
+ (const struct hash_ip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -IPSET_ERR_HASH_ELEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &ip, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 netmask, hbits;
+ struct ip_set_hash *h;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+ netmask = set->family == AF_INET ? 32 : 128;
+ pr_debug("Create set %s with family %s\n",
+ set->name, set->family == AF_INET ? "inet" : "inet6");
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if ((set->family == AF_INET && netmask > 32) ||
+ (set->family == AF_INET6 && netmask > 128) ||
+ netmask == 0)
+ return -IPSET_ERR_INVALID_NETMASK;
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ h->netmask = netmask;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ip4_gc_init(set);
+ else
+ hash_ip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_variant : &hash_ip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+ .name = "hash:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+ return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+ ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644
index 000000000000..adbe787ea5dc
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set hash_ipport_same_set
+#define hash_ipport6_same_set hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+ const struct hash_ipport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+ const struct hash_ipport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ const struct hash_ipport4_telem *tdata =
+ (const struct hash_ipport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+ const struct hash_ipport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+ const struct hash_ipport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ const struct hash_ipport6_telem *e =
+ (const struct hash_ipport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipport4_gc_init(set);
+ else
+ hash_ipport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_variant : &hash_ipport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+ .name = "hash:ip,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+ return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+ ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644
index 000000000000..22e23abb86c6
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set hash_ipportip_same_set
+#define hash_ipportip6_same_set hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+ const struct hash_ipportip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+ const struct hash_ipportip4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ const struct hash_ipportip4_telem *tdata =
+ (const struct hash_ipportip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipportip6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+ const struct hash_ipportip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+ const struct hash_ipportip6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ const struct hash_ipportip6_telem *e =
+ (const struct hash_ipportip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportip4_gc_init(set);
+ else
+ hash_ipportip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_variant : &hash_ipportip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+ .name = "hash:ip,port,ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+ return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644
index 000000000000..6033e8b54bbd
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set hash_ipportnet_same_set
+#define hash_ipportnet6_same_set hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+ const struct hash_ipportnet4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+ const struct hash_ipportnet4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+ elem->ip2 &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ const struct hash_ipportnet4_telem *tdata =
+ (const struct hash_ipportnet4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+ const struct hash_ipportnet6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+ const struct hash_ipportnet6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip2, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ const struct hash_ipportnet6_telem *e =
+ (const struct hash_ipportnet6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+ ip6_netmask(&data.ip2, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip2, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_tvariant
+ : &hash_ipportnet6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportnet4_gc_init(set);
+ else
+ hash_ipportnet6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+ .name = "hash:ip,port,net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportnet_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+ return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644
index 000000000000..c4db202b7da4
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set hash_net_same_set
+#define hash_net6_same_set hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+ const struct hash_net4_elem *ip2)
+{
+ return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+ const struct hash_net4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ const struct hash_net4_telem *tdata =
+ (const struct hash_net4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+struct hash_net6_telem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+ const struct hash_net6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+ const struct hash_net6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ const struct hash_net6_telem *e =
+ (const struct hash_net6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ struct ip_set_hash *h;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_net4_tvariant : &hash_net6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_net4_gc_init(set);
+ else
+ hash_net6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_net4_variant : &hash_net6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+ .name = "hash:net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_net_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+ return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+ ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644
index 000000000000..34a165626ee9
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set hash_netport_same_set
+#define hash_netport6_same_set hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+ const struct hash_netport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+ const struct hash_netport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ const struct hash_netport4_telem *tdata =
+ (const struct hash_netport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+struct hash_netport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+ const struct hash_netport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+ const struct hash_netport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ const struct hash_netport6_telem *e =
+ (const struct hash_netport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_netport4_gc_init(set);
+ else
+ hash_netport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_variant : &hash_netport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+ .name = "hash:net,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_netport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+ return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+ ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644
index 000000000000..a47c32982f06
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+ ip_set_id_t id;
+};
+
+struct set_telem {
+ ip_set_id_t id;
+ unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+ size_t dsize; /* element size */
+ u32 size; /* size of set list array */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collection */
+ struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+ return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+ return elem->id != IPSET_INVALID_ID &&
+ !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+ int ret;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ return 0;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ switch (adt) {
+ case IPSET_TEST:
+ ret = ip_set_test(elem->id, skb, pf, dim, flags);
+ if (ret > 0)
+ return ret;
+ break;
+ case IPSET_ADD:
+ ret = ip_set_add(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ case IPSET_DEL:
+ ret = ip_set_del(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+ const struct set_elem *elem;
+
+ if (i + 1 < map->size) {
+ elem = list_set_elem(map, i + 1);
+ return !!(elem->id == id &&
+ !(with_timeout(map->timeout) &&
+ list_set_expired(map, i + 1)));
+ }
+
+ return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+ struct set_elem *e;
+
+ for (; i < map->size; i++) {
+ e = list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ }
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ struct set_telem *e;
+
+ for (; i < map->size; i++) {
+ e = (struct set_telem *)list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ swap(e->timeout, timeout);
+ }
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ const struct set_elem *e = list_set_elem(map, i);
+
+ if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+ /* Last element replaced: e.g. add new,before,last */
+ ip_set_put_byindex(e->id);
+ if (with_timeout(map->timeout))
+ list_elem_tadd(map, i, id, timeout);
+ else
+ list_elem_add(map, i, id);
+
+ return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+ struct set_elem *a = list_set_elem(map, i), *b;
+
+ ip_set_put_byindex(id);
+
+ for (; i < map->size - 1; i++) {
+ b = list_set_elem(map, i + 1);
+ a->id = b->id;
+ if (with_timeout(map->timeout))
+ ((struct set_telem *)a)->timeout =
+ ((struct set_telem *)b)->timeout;
+ a = b;
+ if (a->id == IPSET_INVALID_ID)
+ break;
+ }
+ /* Last element */
+ a->id = IPSET_INVALID_ID;
+ return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct list_set *map = set->data;
+ bool with_timeout = with_timeout(map->timeout);
+ int before = 0;
+ u32 timeout = map->timeout;
+ ip_set_id_t id, refid = IPSET_INVALID_ID;
+ const struct set_elem *elem;
+ struct ip_set *s;
+ u32 i;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_NAME] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+ if (id == IPSET_INVALID_ID)
+ return -IPSET_ERR_NAME;
+ /* "Loop detection" */
+ if (s->type->features & IPSET_TYPE_NAME) {
+ ret = -IPSET_ERR_LOOP;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ before = f & IPSET_FLAG_BEFORE;
+ }
+
+ if (before && !tb[IPSET_ATTR_NAMEREF]) {
+ ret = -IPSET_ERR_BEFORE;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_NAMEREF]) {
+ refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+ &s);
+ if (refid == IPSET_INVALID_ID) {
+ ret = -IPSET_ERR_NAMEREF;
+ goto finish;
+ }
+ if (!before)
+ before = -1;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout) {
+ ret = -IPSET_ERR_TIMEOUT;
+ goto finish;
+ }
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ switch (adt) {
+ case IPSET_TEST:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID ||
+ (before != 0 && i + 1 >= map->size))
+ break;
+ else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (before > 0 && elem->id == id)
+ ret = next_id_eq(map, i, refid);
+ else if (before < 0 && elem->id == refid)
+ ret = next_id_eq(map, i, id);
+ else if (before == 0 && elem->id == id)
+ ret = 1;
+ }
+ break;
+ case IPSET_ADD:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == id &&
+ !(with_timeout && list_set_expired(map, i)))
+ ret = -IPSET_ERR_EXIST;
+ }
+ if (ret == -IPSET_ERR_EXIST)
+ break;
+ ret = -IPSET_ERR_LIST_FULL;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : list_set_add(map, i, id, timeout);
+ else if (elem->id != refid)
+ continue;
+ else if (with_timeout && list_set_expired(map, i))
+ ret = -IPSET_ERR_REF_EXIST;
+ else if (before)
+ ret = list_set_add(map, i, id, timeout);
+ else if (i + 1 < map->size)
+ ret = list_set_add(map, i + 1, id, timeout);
+ }
+ break;
+ case IPSET_DEL:
+ ret = -IPSET_ERR_EXIST;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID) {
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : -IPSET_ERR_EXIST;
+ break;
+ } else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (elem->id == id &&
+ (before == 0 ||
+ (before > 0 &&
+ next_id_eq(map, i, refid))))
+ ret = list_set_del(map, id, i);
+ else if (before < 0 &&
+ elem->id == refid &&
+ next_id_eq(map, i, id))
+ ret = list_set_del(map, id, i + 1);
+ }
+ break;
+ default:
+ break;
+ }
+
+finish:
+ if (refid != IPSET_INVALID_ID)
+ ip_set_put_byindex(refid);
+ if (adt != IPSET_ADD || ret)
+ ip_set_put_byindex(id);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id != IPSET_INVALID_ID) {
+ ip_set_put_byindex(elem->id);
+ elem->id = IPSET_INVALID_ID;
+ }
+ }
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+ list_set_flush(set);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->size * map->dsize));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 i, first = cb->args[2];
+ const struct set_elem *e;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->size; cb->args[2]++) {
+ i = cb->args[2];
+ e = list_set_elem(map, i);
+ if (e->id == IPSET_INVALID_ID)
+ goto finish;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (i == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+ ip_set_name_byindex(e->id));
+ if (with_timeout(map->timeout)) {
+ const struct set_telem *te =
+ (const struct set_telem *) e;
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(te->timeout)));
+ }
+ ipset_nest_end(skb, nested);
+ }
+finish:
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(i == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct list_set *x = a->data;
+ const struct list_set *y = b->data;
+
+ return x->size == y->size &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+ .kadt = list_set_kadt,
+ .uadt = list_set_uadt,
+ .destroy = list_set_destroy,
+ .flush = list_set_flush,
+ .head = list_set_head,
+ .list = list_set_list,
+ .same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct list_set *map = set->data;
+ struct set_telem *e;
+ u32 i;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (i = map->size - 1; i >= 0; i--) {
+ e = (struct set_telem *) list_set_elem(map, i);
+ if (e->id != IPSET_INVALID_ID &&
+ list_set_expired(map, i))
+ list_set_del(map, e->id, i);
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = list_set_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+ unsigned long timeout)
+{
+ struct list_set *map;
+ struct set_elem *e;
+ u32 i;
+
+ map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+ if (!map)
+ return false;
+
+ map->size = size;
+ map->dsize = dsize;
+ map->timeout = timeout;
+ set->data = map;
+
+ for (i = 0; i < size; i++) {
+ e = list_set_elem(map, i);
+ e->id = IPSET_INVALID_ID;
+ }
+
+ return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_SIZE])
+ size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+ if (size < IP_SET_LIST_MIN_SIZE)
+ size = IP_SET_LIST_MIN_SIZE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!init_list_set(set, size, sizeof(struct set_telem),
+ ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+ return -ENOMEM;
+
+ list_set_gc_init(set);
+ } else {
+ if (!init_list_set(set, size, sizeof(struct set_elem),
+ IPSET_NO_TIMEOUT))
+ return -ENOMEM;
+ }
+ set->variant = &list_set;
+ return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+ .name = "list:set",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = list_set_create,
+ .create_policy = {
+ [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_NAME] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+ return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+ ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644
index 000000000000..23f8c8162214
--- /dev/null
+++ b/net/netfilter/ipset/pfxlen.c
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+ {.ip6 = { \
+ __constant_htonl(a), __constant_htonl(b), \
+ __constant_htonl(c), __constant_htonl(d), \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef E
+#define E(a, b, c, d) \
+ {.ip6 = { (__force __be32) a, (__force __be32) b, \
+ (__force __be32) c, (__force __be32) d, \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index a475edee0912..5c48ffb60c28 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -43,11 +43,6 @@ EXPORT_SYMBOL(register_ip_vs_app);
EXPORT_SYMBOL(unregister_ip_vs_app);
EXPORT_SYMBOL(register_ip_vs_app_inc);
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
/*
* Get an ip_vs_app object
*/
@@ -67,7 +62,8 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
* Allocate/initialize app incarnation and register it in proto apps.
*/
static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
struct ip_vs_protocol *pp;
struct ip_vs_app *inc;
@@ -98,7 +94,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
}
}
- ret = pp->register_app(inc);
+ ret = pp->register_app(net, inc);
if (ret)
goto out;
@@ -119,7 +115,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
* Release app incarnation
*/
static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
@@ -127,7 +123,7 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
return;
if (pp->unregister_app)
- pp->unregister_app(inc);
+ pp->unregister_app(net, inc);
IP_VS_DBG(9, "%s App %s:%u unregistered\n",
pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc)
* Register an application incarnation in protocol applications
*/
int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
int result;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- result = ip_vs_app_inc_new(app, proto, port);
+ result = ip_vs_app_inc_new(net, app, proto, port);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return result;
}
@@ -185,16 +183,17 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
/*
* ip_vs_app registration routine
*/
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- list_add(&app->a_list, &ip_vs_app_list);
+ list_add(&app->a_list, &ipvs->app_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return 0;
}
@@ -204,19 +203,20 @@ int register_ip_vs_app(struct ip_vs_app *app)
* ip_vs_app unregistration routine
* We are sure there are no app incarnations attached to services
*/
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_app *inc, *nxt;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
- ip_vs_app_inc_release(inc);
+ ip_vs_app_inc_release(net, inc);
}
list_del(&app->a_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -226,7 +226,8 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
/*
* Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
*/
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp)
{
return pp->app_conn_bind(cp);
}
@@ -481,11 +482,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
* /proc/net/ip_vs_app entry function
*/
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
{
struct ip_vs_app *app, *inc;
- list_for_each_entry(app, &ip_vs_app_list, a_list) {
+ list_for_each_entry(app, &ipvs->app_list, a_list) {
list_for_each_entry(inc, &app->incs_list, a_list) {
if (pos-- == 0)
return inc;
@@ -497,19 +498,24 @@ static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
- mutex_lock(&__ip_vs_app_mutex);
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
- return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+ mutex_lock(&ipvs->app_mutex);
+
+ return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_app *inc, *app;
struct list_head *e;
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
++*pos;
if (v == SEQ_START_TOKEN)
- return ip_vs_app_idx(0);
+ return ip_vs_app_idx(ipvs, 0);
inc = v;
app = inc->app;
@@ -518,7 +524,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
- for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+ for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
app = list_entry(e, struct ip_vs_app, a_list);
list_for_each_entry(inc, &app->incs_list, a_list) {
return inc;
@@ -529,7 +535,9 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
- mutex_unlock(&__ip_vs_app_mutex);
+ struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+ mutex_unlock(&ipvs->app_mutex);
}
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@ static const struct seq_operations ip_vs_app_seq_ops = {
static int ip_vs_app_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_app_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+ sizeof(struct seq_net_private));
}
static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@ static const struct file_operations ip_vs_app_fops = {
};
#endif
-int __init ip_vs_app_init(void)
+static int __net_init __ip_vs_app_init(struct net *net)
{
- /* we will replace it with proc_net_ipvs_create() soon */
- proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->app_list);
+ __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+ proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
return 0;
}
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+ proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_app_init,
+ .exit = __ip_vs_app_cleanup,
+};
+
+int __init ip_vs_app_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_app_cleanup(void)
{
- proc_net_remove(&init_net, "ip_vs_app");
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index e9adecdc8ca4..83233fe24a08 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -48,35 +48,32 @@
/*
* Connection hash size. Default is what was selected at compile time.
*/
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
-static struct list_head *ip_vs_conn_tab;
+static struct list_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
-/* counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
-#define CT_LOCKARRAY_BITS 4
+#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
@@ -133,19 +130,19 @@ static inline void ct_write_unlock_bh(unsigned key)
/*
* Returns hash value for IPVS connection entry
*/
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
- (__force u32)port, proto, ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+ (__force u32)port, proto, ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
#endif
- return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
- ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+ ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
port = p->vport;
}
- return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+ return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
- NULL, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+ &cp->caddr, cp->cport, NULL, 0, &p);
- if (cp->dest && cp->dest->svc->pe) {
- p.pe = cp->dest->svc->pe;
+ if (cp->pe) {
+ p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
@@ -186,7 +183,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
}
/*
- * Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -269,11 +266,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
- p->cport == cp->cport && p->vport == cp->vport &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
@@ -313,23 +311,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
+ struct net *net = skb_net(skb);
pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!inverse))
- ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
- &iph->daddr, pptr[1], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+ pptr[0], &iph->daddr, pptr[1], p);
else
- ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
- &iph->saddr, pptr[0], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+ pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -353,8 +351,10 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
ct_read_lock(hash);
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ if (!ip_vs_conn_net_eq(cp, p->net))
+ continue;
if (p->pe_data && p->pe->ct_match) {
- if (p->pe->ct_match(p, cp))
+ if (p->pe == cp->pe && p->pe->ct_match(p, cp))
goto out;
continue;
}
@@ -404,10 +404,11 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
- p->vport == cp->cport && p->cport == cp->dport &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ret = cp;
@@ -428,7 +429,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
struct ip_vs_conn *
ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -611,9 +611,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
struct ip_vs_dest *dest;
if ((cp) && (!cp->dest)) {
- dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
- &cp->vaddr, cp->vport,
- cp->protocol);
+ dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+ cp->dport, &cp->vaddr, cp->vport,
+ cp->protocol, cp->fwmark);
ip_vs_bind_dest(cp, dest);
return dest;
} else
@@ -686,13 +686,14 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- (sysctl_ip_vs_expire_quiescent_template &&
+ (ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
@@ -730,6 +731,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
cp->timeout = 60*HZ;
@@ -765,13 +767,14 @@ static void ip_vs_conn_expire(unsigned long data)
if (cp->flags & IP_VS_CONN_F_NFCT)
ip_vs_conn_drop_conntrack(cp);
+ ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
- atomic_dec(&ip_vs_conn_count);
+ atomic_dec(&ipvs->conn_count);
kmem_cache_free(ip_vs_conn_cachep, cp);
return;
@@ -802,10 +805,12 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
- struct ip_vs_dest *dest)
+ struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+ struct netns_ipvs *ipvs = net_ipvs(p->net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+ p->protocol);
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
@@ -815,6 +820,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
INIT_LIST_HEAD(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+ ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
cp->protocol = p->protocol;
ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +832,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
&cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
- if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+ cp->fwmark = fwmark;
+ if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+ ip_vs_pe_get(p->pe);
+ cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
}
@@ -842,7 +851,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
- atomic_inc(&ip_vs_conn_count);
+ atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
@@ -861,8 +870,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
#endif
ip_vs_bind_xmit(cp);
- if (unlikely(pp && atomic_read(&pp->appcnt)))
- ip_vs_bind_app(cp, pp);
+ if (unlikely(pd && atomic_read(&pd->appcnt)))
+ ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +880,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* IP_VS_CONN_F_ONE_PACKET too.
*/
- if (ip_vs_conntrack_enabled())
+ if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
@@ -884,17 +893,22 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+ struct seq_net_private p;
+ struct list_head *l;
+};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
+ struct ip_vs_iter_state *iter = seq->private;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
- seq->private = &ip_vs_conn_tab[idx];
+ iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
@@ -906,14 +920,17 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
- seq->private = NULL;
+ struct ip_vs_iter_state *iter = seq->private;
+
+ iter->l = NULL;
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
- struct list_head *e, *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct list_head *e, *l = iter->l;
int idx;
++*pos;
@@ -930,18 +947,19 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
- seq->private = &ip_vs_conn_tab[idx];
+ iter->l = &ip_vs_conn_tab[idx];
return cp;
}
ct_read_unlock_bh(idx);
}
- seq->private = NULL;
+ iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
{
- struct list_head *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct list_head *l = iter->l;
if (l)
ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +973,19 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
- if (cp->dest && cp->pe_data &&
- cp->dest->svc->pe->show_pe_data) {
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
+ if (cp->pe_data) {
pe_data[0] = ' ';
- len = strlen(cp->dest->svc->pe->name);
- memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+ len = strlen(cp->pe->name);
+ memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
- len += cp->dest->svc->pe->show_pe_data(cp,
- pe_data + len);
+ len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
@@ -1004,7 +1023,8 @@ static const struct seq_operations ip_vs_conn_seq_ops = {
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1051,10 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
+
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
@@ -1067,7 +1091,8 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_sync_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1138,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
}
/* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
@@ -1133,7 +1158,8 @@ void ip_vs_random_dropentry(void)
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
-
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
@@ -1168,12 +1194,13 @@ void ip_vs_random_dropentry(void)
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- flush_again:
+flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
/*
* Lock is actually needed in this loop.
@@ -1181,7 +1208,8 @@ static void ip_vs_conn_flush(void)
ct_write_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
@@ -1194,16 +1222,41 @@ static void ip_vs_conn_flush(void)
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
- if (atomic_read(&ip_vs_conn_count) != 0) {
+ if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ atomic_set(&ipvs->conn_count, 0);
+
+ proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+ proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ return 0;
+}
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+ /* flush all the connection entries first */
+ ip_vs_conn_flush(net);
+ proc_net_remove(net, "ip_vs_conn");
+ proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+ .init = __ip_vs_conn_init,
+ .exit = __ip_vs_conn_cleanup,
+};
int __init ip_vs_conn_init(void)
{
int idx;
+ int retc;
/* Compute size and mask */
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1241,24 +1294,18 @@ int __init ip_vs_conn_init(void)
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
- proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
- proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ retc = register_pernet_subsys(&ipvs_conn_ops);
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
- return 0;
+ return retc;
}
-
void ip_vs_conn_cleanup(void)
{
- /* flush all the connection entries first */
- ip_vs_conn_flush();
-
+ unregister_pernet_subsys(&ipvs_conn_ops);
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
- proc_net_remove(&init_net, "ip_vs_conn");
- proc_net_remove(&init_net, "ip_vs_conn_sync");
vfree(ip_vs_conn_tab);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b4e51e9c5a04..4d06617fab6c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -41,6 +41,7 @@
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h>
#include <net/ip6_checksum.h>
+#include <net/netns/generic.h> /* net_generic() */
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@ EXPORT_SYMBOL(ip_vs_conn_put);
EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
/* ID used in ICMP lookups */
#define icmp_id(icmph) (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@ static inline void
ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.inpkts++;
- dest->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.inpkts++;
- dest->svc->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.inpkts++;
- ip_vs_stats.ustats.inbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -131,21 +145,28 @@ static inline void
ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.outpkts++;
- dest->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.outpkts++;
- dest->svc->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.outpkts++;
- ip_vs_stats.ustats.outbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -153,41 +174,44 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
static inline void
ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
{
- spin_lock(&cp->dest->stats.lock);
- cp->dest->stats.ustats.conns++;
- spin_unlock(&cp->dest->stats.lock);
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(cp->dest->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&svc->stats.lock);
- svc->stats.ustats.conns++;
- spin_unlock(&svc->stats.lock);
+ s = this_cpu_ptr(svc->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.conns++;
- spin_unlock(&ip_vs_stats.lock);
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.conns++;
}
static inline int
ip_vs_set_state(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- if (unlikely(!pp->state_transition))
+ if (unlikely(!pd->pp->state_transition))
return 0;
- return pp->state_transition(cp, direction, skb, pp);
+ return pd->pp->state_transition(cp, direction, skb, pd);
}
-static inline void
+static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
struct sk_buff *skb, int protocol,
const union nf_inet_addr *caddr, __be16 cport,
const union nf_inet_addr *vaddr, __be16 vport,
struct ip_vs_conn_param *p)
{
- ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+ ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+ vport, p);
p->pe = svc->pe;
if (p->pe && p->pe->fill_param)
- p->pe->fill_param(p, skb);
+ return p->pe->fill_param(p, skb);
+
+ return 0;
}
/*
@@ -200,7 +224,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
struct sk_buff *skb,
- __be16 ports[2])
+ __be16 src_port, __be16 dst_port, int *ignored)
{
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
"mnet %s\n",
- IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
- IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+ IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+ IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
IP_VS_DBG_ADDR(svc->af, &snet));
/*
@@ -247,14 +271,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
__be16 vport = 0;
- if (ports[1] == svc->port) {
+ if (dst_port == svc->port) {
/* non-FTP template:
* <protocol, caddr, 0, vaddr, vport, daddr, dport>
* FTP template:
* <protocol, caddr, 0, vaddr, 0, daddr, 0>
*/
if (svc->port != FTPPORT)
- vport = ports[1];
+ vport = dst_port;
} else {
/* Note: persistent fwmark-based services and
* persistent port zero service are handled here.
@@ -268,24 +292,31 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
vaddr = &fwmark;
}
}
- ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
- vaddr, vport, &param);
+ /* return *ignored = -1 so NF_DROP can be used */
+ if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+ vaddr, vport, &param) < 0) {
+ *ignored = -1;
+ return NULL;
+ }
}
/* Check if a template already exists */
ct = ip_vs_ct_in_get(&param);
if (!ct || !ip_vs_check_template(ct)) {
- /* No template found or the dest of the connection
+ /*
+ * No template found or the dest of the connection
* template is not available.
+ * return *ignored=0 i.e. ICMP and NF_DROP
*/
dest = svc->scheduler->schedule(svc, skb);
if (!dest) {
IP_VS_DBG(1, "p-schedule: no dest found.\n");
kfree(param.pe_data);
+ *ignored = 0;
return NULL;
}
- if (ports[1] == svc->port && svc->port != FTPPORT)
+ if (dst_port == svc->port && svc->port != FTPPORT)
dport = dest->port;
/* Create a template
@@ -293,9 +324,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* and thus param.pe_data will be destroyed
* when the template expires */
ct = ip_vs_conn_new(&param, &dest->addr, dport,
- IP_VS_CONN_F_TEMPLATE, dest);
+ IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
if (ct == NULL) {
kfree(param.pe_data);
+ *ignored = -1;
return NULL;
}
@@ -306,7 +338,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
kfree(param.pe_data);
}
- dport = ports[1];
+ dport = dst_port;
if (dport == svc->port && dest->port)
dport = dest->port;
@@ -317,11 +349,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
/*
* Create a new connection according to the template
*/
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
- &iph.daddr, ports[1], &param);
- cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+ src_port, &iph.daddr, dst_port, &param);
+
+ cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
if (cp == NULL) {
ip_vs_conn_put(ct);
+ *ignored = -1;
return NULL;
}
@@ -341,11 +375,27 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* It selects a server according to the virtual service, and
* creates a connection entry.
* Protocols supported: TCP, UDP
+ *
+ * Usage of *ignored
+ *
+ * 1 : protocol tried to schedule (eg. on SYN), found svc but the
+ * svc/scheduler decides that this packet should be accepted with
+ * NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 : scheduler can not find destination, so try bypass or
+ * return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 : scheduler tried to schedule but fatal error occurred, eg.
+ * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ * failure such as missing Call-ID, ENOMEM on skb_linearize
+ * or pe_data. In this case we should return NF_DROP without
+ * any attempts to send ICMP with ip_vs_leave.
*/
struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored)
+ struct ip_vs_proto_data *pd, int *ignored)
{
+ struct ip_vs_protocol *pp = pd->pp;
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
}
/*
- * Do not schedule replies from local real server. It is risky
- * for fwmark services but mostly for persistent services.
+ * Do not schedule replies from local real server.
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
- (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
- (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+ (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
"Not scheduling reply for existing connection");
__ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
/*
* Persistent service
*/
- if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
- *ignored = 0;
- return ip_vs_sched_persist(svc, skb, pptr);
- }
+ if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+ return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+ *ignored = 0;
/*
* Non-persistent service
@@ -402,8 +450,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
return NULL;
}
- *ignored = 0;
-
dest = svc->scheduler->schedule(svc, skb);
if (dest == NULL) {
IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
- pptr[0], &iph.daddr, pptr[1], &p);
+
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+ &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+ &p);
cp = ip_vs_conn_new(&p, &dest->addr,
dest->port ? dest->port : pptr[1],
- flags, dest);
- if (!cp)
+ flags, dest, skb->mark);
+ if (!cp) {
+ *ignored = -1;
return NULL;
+ }
}
IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,14 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
* no destination is available for a new connection.
*/
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
+ struct net *net;
+ struct netns_ipvs *ipvs;
__be16 _ports[2], *pptr;
struct ip_vs_iphdr iph;
int unicast;
+
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -459,18 +512,20 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_service_put(svc);
return NF_DROP;
}
+ net = skb_net(skb);
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
else
#endif
- unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+ unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
/* if it is fwmark-based service, the cache_bypass sysctl is up
and the destination is a non-local unicast, then create
a cache_bypass connection entry */
- if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+ ipvs = net_ipvs(net);
+ if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
int ret, cs;
struct ip_vs_conn *cp;
unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +539,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol,
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
&iph.saddr, pptr[0],
&iph.daddr, pptr[1], &p);
cp = ip_vs_conn_new(&p, &daddr, 0,
IP_VS_CONN_F_BYPASS | flags,
- NULL);
+ NULL, skb->mark);
if (!cp)
return NF_DROP;
}
@@ -498,10 +553,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_in_stats(cp, skb);
/* set state */
- cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
/* transmit the first SYN packet */
- ret = cp->packet_xmit(skb, cp, pp);
+ ret = cp->packet_xmit(skb, cp, pd->pp);
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
@@ -682,6 +737,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
struct ip_vs_protocol *pp,
unsigned int offset, unsigned int ihl)
{
+ struct netns_ipvs *ipvs;
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -703,6 +759,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
if (!skb_make_writable(skb, offset))
goto out;
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -712,11 +770,11 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto out;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto out;
@@ -808,7 +866,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -885,7 +943,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -924,9 +982,12 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
* Used for NAT and local client.
*/
static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
struct ip_vs_conn *cp, int ihl)
{
+ struct ip_vs_protocol *pp = pd->pp;
+ struct netns_ipvs *ipvs;
+
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
if (!skb_make_writable(skb, ihl))
@@ -961,13 +1022,15 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* if it came from this machine itself. So re-compute
* the routing information.
*/
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto drop;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto drop;
@@ -975,7 +1038,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
ip_vs_out_stats(cp, skb);
- ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb->ipvs_property = 1;
if (!(cp->flags & IP_VS_CONN_F_NFCT))
ip_vs_notrack(skb);
@@ -999,9 +1062,12 @@ drop:
static unsigned int
ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs;
EnterFunction(11);
@@ -1022,6 +1088,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
+ net = skb_net(skb);
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
@@ -1045,9 +1112,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
+ pp = pd->pp;
/* reassemble IP fragments */
#ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1141,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+ ipvs = net_ipvs(net);
if (likely(cp))
- return handle_response(af, skb, pp, cp, iph.len);
- if (sysctl_ip_vs_nat_icmp_send &&
+ return handle_response(af, skb, pd, cp, iph.len);
+ if (ipvs->sysctl_nat_icmp_send &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1156,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
sizeof(_ports), _ports);
if (pptr == NULL)
return NF_ACCEPT; /* Not for me */
- if (ip_vs_lookup_real_service(af, iph.protocol,
+ if (ip_vs_lookup_real_service(net, af, iph.protocol,
&iph.saddr,
pptr[0])) {
/*
@@ -1202,12 +1271,14 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
static int
ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct iphdr *iph;
struct icmphdr _icmph, *ic;
struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, ihl, verdict;
union nf_inet_addr snet;
@@ -1249,9 +1320,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->protocol);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->protocol);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,10 +1338,10 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
if (!cp) {
/* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
if (cp) {
snet.ip = iph->saddr;
return handle_response_icmp(AF_INET, skb, &snet,
@@ -1312,6 +1385,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
static int
ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct ipv6hdr *iph;
struct icmp6hdr _icmph, *ic;
struct ipv6hdr _ciph, *cih; /* The ip header contained
@@ -1319,6 +1393,7 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, verdict;
union nf_inet_addr snet;
struct rt6_info *rt;
@@ -1361,9 +1436,11 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->nexthdr);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->nexthdr);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
/* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,10 +1454,10 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
if (!cp) {
/* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
if (cp) {
ipv6_addr_copy(&snet.in6, &iph->saddr);
return handle_response_icmp(AF_INET6, skb, &snet,
@@ -1423,10 +1500,13 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
static unsigned int
ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, restart, pkts;
+ struct netns_ipvs *ipvs;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
@@ -1480,20 +1560,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
+ net = skb_net(skb);
/* Protocol supported? */
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
-
+ pp = pd->pp;
/*
* Check if the packet belongs to an existing connection entry
*/
- cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
if (unlikely(!cp)) {
int v;
- if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+ if (!pp->conn_schedule(af, skb, pd, &v, &cp))
return v;
}
@@ -1505,12 +1586,13 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+ net = skb_net(skb);
+ ipvs = net_ipvs(net);
/* Check the server status */
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
- if (sysctl_ip_vs_expire_nodest_conn) {
+ if (ipvs->sysctl_expire_nodest_conn) {
/* try to expire the connection immediately */
ip_vs_conn_expire_now(cp);
}
@@ -1521,7 +1603,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
ip_vs_in_stats(cp, skb);
- restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
if (cp->packet_xmit)
ret = cp->packet_xmit(skb, cp, pp);
/* do not touch skb anymore */
@@ -1535,35 +1617,41 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*
* Sync connection if it is about to close to
* encorage the standby servers to update the connections timeout
+ *
+ * For ONE_PKT let ip_vs_sync_conn() do the filter work.
*/
- pkts = atomic_add_return(1, &cp->in_pkts);
- if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = ipvs->sysctl_sync_threshold[0];
+ else
+ pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
goto out;
}
}
/* Keep this block last: TCP and others with pp->num_states <= 1 */
- else if (af == AF_INET &&
- (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+ else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
(cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
out:
cp->old_state = cp->state;
@@ -1782,7 +1870,39 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
},
#endif
};
+/*
+ * Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+ struct netns_ipvs *ipvs;
+
+ ipvs = net_generic(net, ip_vs_net_id);
+ if (ipvs == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->net = net;
+ /* Counters used for creating unique names */
+ ipvs->gen = atomic_read(&ipvs_netns_cnt);
+ atomic_inc(&ipvs_netns_cnt);
+ net->ipvs = ipvs;
+ printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+ sizeof(struct netns_ipvs), ipvs->gen);
+ return 0;
+}
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+ IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+ .init = __ip_vs_init,
+ .exit = __ip_vs_cleanup,
+ .id = &ip_vs_net_id,
+ .size = sizeof(struct netns_ipvs),
+};
/*
* Initialize IP Virtual Server
@@ -1791,8 +1911,11 @@ static int __init ip_vs_init(void)
{
int ret;
- ip_vs_estimator_init();
+ ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
+ if (ret < 0)
+ return ret;
+ ip_vs_estimator_init();
ret = ip_vs_control_init();
if (ret < 0) {
pr_err("can't setup control.\n");
@@ -1813,15 +1936,23 @@ static int __init ip_vs_init(void)
goto cleanup_app;
}
+ ret = ip_vs_sync_init();
+ if (ret < 0) {
+ pr_err("can't setup sync data.\n");
+ goto cleanup_conn;
+ }
+
ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0) {
pr_err("can't register hooks.\n");
- goto cleanup_conn;
+ goto cleanup_sync;
}
pr_info("ipvs loaded.\n");
return ret;
+cleanup_sync:
+ ip_vs_sync_cleanup();
cleanup_conn:
ip_vs_conn_cleanup();
cleanup_app:
@@ -1831,17 +1962,20 @@ static int __init ip_vs_init(void)
ip_vs_control_cleanup();
cleanup_estimator:
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
return ret;
}
static void __exit ip_vs_cleanup(void)
{
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ip_vs_sync_cleanup();
ip_vs_conn_cleanup();
ip_vs_app_cleanup();
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
pr_info("ipvs unloaded.\n");
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..c73b0c831a2d 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -38,6 +38,7 @@
#include <linux/mutex.h>
#include <net/net_namespace.h>
+#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
@@ -57,42 +58,7 @@ static DEFINE_MUTEX(__ip_vs_mutex);
/* lock for service table */
static DEFINE_RWLOCK(__ip_vs_svc_lock);
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
/* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
@@ -105,7 +71,8 @@ int ip_vs_get_debug_level(void)
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+ const struct in6_addr *addr)
{
struct rt6_info *rt;
struct flowi fl = {
@@ -114,7 +81,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
};
- rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+ rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl);
if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
return 1;
@@ -125,7 +92,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@ static void update_defense_level(void)
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
- nomem = (availmem < sysctl_ip_vs_amemthresh);
+ nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
- spin_lock(&__ip_vs_dropentry_lock);
- switch (sysctl_ip_vs_drop_entry) {
+ spin_lock(&ipvs->dropentry_lock);
+ switch (ipvs->sysctl_drop_entry) {
case 0:
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
- sysctl_ip_vs_drop_entry = 2;
+ atomic_set(&ipvs->dropentry, 1);
+ ipvs->sysctl_drop_entry = 2;
} else {
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
} else {
- atomic_set(&ip_vs_dropentry, 0);
- sysctl_ip_vs_drop_entry = 1;
+ atomic_set(&ipvs->dropentry, 0);
+ ipvs->sysctl_drop_entry = 1;
};
break;
case 3:
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
break;
}
- spin_unlock(&__ip_vs_dropentry_lock);
+ spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
- spin_lock(&__ip_vs_droppacket_lock);
- switch (sysctl_ip_vs_drop_packet) {
+ spin_lock(&ipvs->droppacket_lock);
+ switch (ipvs->sysctl_drop_packet) {
case 0:
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
- sysctl_ip_vs_drop_packet = 2;
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
+ ipvs->sysctl_drop_packet = 2;
} else {
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
} else {
- ip_vs_drop_rate = 0;
- sysctl_ip_vs_drop_packet = 1;
+ ipvs->drop_rate = 0;
+ ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
- ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+ ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
- spin_unlock(&__ip_vs_droppacket_lock);
+ spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
- spin_lock(&ip_vs_securetcp_lock);
- switch (sysctl_ip_vs_secure_tcp) {
+ spin_lock(&ipvs->securetcp_lock);
+ switch (ipvs->sysctl_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
to_change = 0;
@@ -216,7 +183,7 @@ static void update_defense_level(void)
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
- sysctl_ip_vs_secure_tcp = 2;
+ ipvs->sysctl_secure_tcp = 2;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
@@ -229,7 +196,7 @@ static void update_defense_level(void)
} else {
if (old_secure_tcp >= 2)
to_change = 0;
- sysctl_ip_vs_secure_tcp = 1;
+ ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
@@ -237,10 +204,11 @@ static void update_defense_level(void)
to_change = 1;
break;
}
- old_secure_tcp = sysctl_ip_vs_secure_tcp;
+ old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
- ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
- spin_unlock(&ip_vs_securetcp_lock);
+ ip_vs_protocol_timeout_change(ipvs,
+ ipvs->sysctl_secure_tcp > 1);
+ spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
@@ -250,16 +218,16 @@ static void update_defense_level(void)
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
static void defense_work_handler(struct work_struct *work)
{
- update_defense_level();
- if (atomic_read(&ip_vs_dropentry))
- ip_vs_random_dropentry();
+ struct netns_ipvs *ipvs =
+ container_of(work, struct netns_ipvs, defense_work.work);
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+ update_defense_level(ipvs);
+ if (atomic_read(&ipvs->dropentry))
+ ip_vs_random_dropentry(ipvs->net);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
int
@@ -287,33 +255,13 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
-/*
- * Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- * Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- * FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
/*
* Returns hash value for virtual service
*/
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
- __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+ const union nf_inet_addr *addr, __be16 port)
{
register unsigned porth = ntohs(port);
__be32 addr_fold = addr->ip;
@@ -323,6 +271,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
+ addr_fold ^= ((size_t)net>>8);
return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
& IP_VS_SVC_TAB_MASK;
@@ -331,13 +280,13 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
/*
* Returns hash value of fwmark for virtual service lookup
*/
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
- return fwmark & IP_VS_SVC_TAB_MASK;
+ return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
- * Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ * Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
@@ -353,16 +302,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
if (svc->fwmark == 0) {
/*
- * Hash it by <protocol,addr,port> in ip_vs_svc_table
+ * Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
- hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
- svc->port);
+ hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+ &svc->addr, svc->port);
list_add(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
- * Hash it by fwmark in ip_vs_svc_fwm_table
+ * Hash it by fwmark in svc_fwm_table
*/
- hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+ hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
@@ -374,7 +323,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
/*
- * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ * Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +335,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
}
if (svc->fwmark == 0) {
- /* Remove it from the ip_vs_svc_table table */
+ /* Remove it from the svc_table table */
list_del(&svc->s_list);
} else {
- /* Remove it from the ip_vs_svc_fwm_table table */
+ /* Remove it from the svc_fwm_table table */
list_del(&svc->f_list);
}
@@ -400,23 +349,24 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
/*
- * Get service by {proto,addr,port} in the service table.
+ * Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
- __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+ const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
- hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+ hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
- && (svc->protocol == protocol)) {
+ && (svc->protocol == protocol)
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -430,16 +380,17 @@ __ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
- hash = ip_vs_svc_fwm_hashkey(fwmark);
+ hash = ip_vs_svc_fwm_hashkey(net, fwmark);
list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
- if (svc->fwmark == fwmark && svc->af == af) {
+ if (svc->fwmark == fwmark && svc->af == af
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -449,42 +400,44 @@ __ip_vs_svc_fwm_find(int af, __u32 fwmark)
}
struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
+ struct netns_ipvs *ipvs = net_ipvs(net);
read_lock(&__ip_vs_svc_lock);
/*
* Check the table hashed by fwmark first
*/
- if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
+ svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+ if (fwmark && svc)
goto out;
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
if (svc == NULL
&& protocol == IPPROTO_TCP
- && atomic_read(&ip_vs_ftpsvc_counter)
+ && atomic_read(&ipvs->ftpsvc_counter)
&& (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
- && atomic_read(&ip_vs_nullsvc_counter)) {
+ && atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
}
out:
@@ -519,6 +472,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
}
@@ -545,10 +499,10 @@ static inline unsigned ip_vs_rs_hashkey(int af,
}
/*
- * Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ * Hashes ip_vs_dest in rs_table by <proto,addr,port>.
* should be called with locked tables.
*/
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned hash;
@@ -562,19 +516,19 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
- list_add(&dest->d_list, &ip_vs_rtable[hash]);
+ list_add(&dest->d_list, &ipvs->rs_table[hash]);
return 1;
}
/*
- * UNhashes ip_vs_dest from ip_vs_rtable.
+ * UNhashes ip_vs_dest from rs_table.
* should be called with locked tables.
*/
static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
- * Remove it from the ip_vs_rtable table.
+ * Remove it from the rs_table table.
*/
if (!list_empty(&dest->d_list)) {
list_del(&dest->d_list);
@@ -588,10 +542,11 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
* Lookup real service by <proto,addr,port> in the real service table.
*/
struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr,
__be16 dport)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
unsigned hash;
struct ip_vs_dest *dest;
@@ -601,19 +556,19 @@ ip_vs_lookup_real_service(int af, __u16 protocol,
*/
hash = ip_vs_rs_hashkey(af, daddr, dport);
- read_lock(&__ip_vs_rs_lock);
- list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+ read_lock(&ipvs->rs_lock);
+ list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
if ((dest->af == af)
&& ip_vs_addr_equal(af, &dest->addr, daddr)
&& (dest->port == dport)
&& ((dest->protocol == protocol) ||
dest->vfwmark)) {
/* HIT */
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return dest;
}
}
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return NULL;
}
@@ -652,15 +607,16 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* ip_vs_lookup_real_service() looked promissing, but
* seems not working as expected.
*/
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
+ const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
- __be16 vport, __u16 protocol)
+ __be16 vport, __u16 protocol, __u32 fwmark)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
- svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+ svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +641,12 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/*
* Find the destination in trash
*/
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
@@ -720,6 +677,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -737,14 +695,16 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* are expired, and the refcnt of each destination in the trash must
* be 1, so we simply release them here.
*/
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -768,6 +728,7 @@ static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
int conn_flags;
/* set the weight and the flags */
@@ -780,12 +741,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/*
- * Put the real service in ip_vs_rtable if not present.
+ * Put the real service in rs_table if not present.
* For now only for NAT!
*/
- write_lock_bh(&__ip_vs_rs_lock);
- ip_vs_rs_hash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
}
atomic_set(&dest->conn_flags, conn_flags);
@@ -813,7 +774,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
spin_unlock(&dest->dst_lock);
if (add)
- ip_vs_new_estimator(&dest->stats);
+ ip_vs_new_estimator(svc->net, &dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
@@ -850,12 +811,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
- !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+ !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
return -EINVAL;
} else
#endif
{
- atype = inet_addr_type(&init_net, udest->addr.ip);
+ atype = inet_addr_type(svc->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
@@ -865,6 +826,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
}
+ dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!dest->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
dest->af = svc->af;
dest->protocol = svc->protocol;
@@ -888,6 +854,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
LeaveFunction(2);
return 0;
+
+err_alloc:
+ kfree(dest);
+ return -ENOMEM;
}
@@ -1006,16 +976,18 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete a destination (must be already unlinked from the service)
*/
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
{
- ip_vs_kill_estimator(&dest->stats);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_kill_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
- write_lock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
ip_vs_rs_unhash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_unlock_bh(&ipvs->rs_lock);
/*
* Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1006,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
and only one user context can update virtual service at a
time, so the operation here is OK */
atomic_dec(&dest->svc->refcnt);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
} else {
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1014,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
- list_add(&dest->n_list, &ip_vs_dest_trash);
+ list_add(&dest->n_list, &ipvs->dest_trash);
atomic_inc(&dest->refcnt);
}
}
@@ -1105,7 +1078,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete the destination
*/
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
LeaveFunction(2);
@@ -1117,13 +1090,14 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
* Add a service into the service hash table
*/
static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
@@ -1137,7 +1111,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1159,6 +1133,11 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ret = -ENOMEM;
goto out_err;
}
+ svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!svc->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1151,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
+ svc->net = net;
INIT_LIST_HEAD(&svc->destinations);
rwlock_init(&svc->sched_lock);
@@ -1189,15 +1169,15 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
/* Update the virtual service counters */
if (svc->port == FTPPORT)
- atomic_inc(&ip_vs_ftpsvc_counter);
+ atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_inc(&ip_vs_nullsvc_counter);
+ atomic_inc(&ipvs->nullsvc_counter);
- ip_vs_new_estimator(&svc->stats);
+ ip_vs_new_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services++;
+ ipvs->num_services++;
/* Hash the service into the service table */
write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1187,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
*svc_p = svc;
return 0;
+
out_err:
if (svc != NULL) {
ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1196,8 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ip_vs_app_inc_put(svc->inc);
local_bh_enable();
}
+ if (svc->stats.cpustats)
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
ip_vs_scheduler_put(sched);
@@ -1248,7 +1231,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
old_sched = sched;
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1334,14 +1317,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services--;
+ ipvs->num_services--;
- ip_vs_kill_estimator(&svc->stats);
+ ip_vs_kill_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
@@ -1364,16 +1348,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
- atomic_dec(&ip_vs_ftpsvc_counter);
+ atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_dec(&ip_vs_nullsvc_counter);
+ atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
@@ -1383,6 +1367,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
@@ -1428,17 +1413,19 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
/*
* Flush all the virtual services
*/
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
{
int idx;
struct ip_vs_service *svc, *nxt;
/*
- * Flush the service table hashed by <protocol,addr,port>
+ * Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
- ip_vs_unlink_service(svc);
+ list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+ s_list) {
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1448,7 +1435,8 @@ static int ip_vs_flush(void)
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_unlink_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1472,24 +1460,26 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
return 0;
}
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
- ip_vs_zero_stats(&ip_vs_stats);
+ ip_vs_zero_stats(net_ipvs(net)->tot_stats);
return 0;
}
@@ -1498,6 +1488,7 @@ static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct net *net = current->nsproxy->net_ns;
int *valp = table->data;
int val = *valp;
int rc;
@@ -1508,7 +1499,7 @@ proc_do_defense_mode(ctl_table *table, int write,
/* Restore the correct value */
*valp = val;
} else {
- update_defense_level();
+ update_defense_level(net_ipvs(net));
}
}
return rc;
@@ -1534,45 +1525,54 @@ proc_do_sync_threshold(ctl_table *table, int write,
return rc;
}
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int *valp = table->data;
+ int val = *valp;
+ int rc;
+
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+ if ((*valp < 0) || (*valp > 1)) {
+ /* Restore the correct value */
+ *valp = val;
+ } else {
+ struct net *net = current->nsproxy->net_ns;
+ ip_vs_sync_switch_mode(net, val);
+ }
+ }
+ return rc;
+}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ * Do not change order or insert new entries without
+ * align with netns init in __ip_vs_control_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
- .data = &sysctl_ip_vs_amemthresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#ifdef CONFIG_IP_VS_DEBUG
- {
- .procname = "debug_level",
- .data = &sysctl_ip_vs_debug_level,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{
.procname = "am_droprate",
- .data = &sysctl_ip_vs_am_droprate,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
- .data = &sysctl_ip_vs_drop_entry,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
- .data = &sysctl_ip_vs_drop_packet,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
@@ -1580,7 +1580,6 @@ static struct ctl_table vs_vars[] = {
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
- .data = &sysctl_ip_vs_conntrack,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
@@ -1588,18 +1587,62 @@ static struct ctl_table vs_vars[] = {
#endif
{
.procname = "secure_tcp",
- .data = &sysctl_ip_vs_secure_tcp,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
- .data = &sysctl_ip_vs_snat_reroute,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .procname = "sync_version",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_do_sync_mode,
+ },
+ {
+ .procname = "cache_bypass",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_nodest_conn",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_quiescent_template",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_threshold",
+ .maxlen =
+ sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+ .mode = 0644,
+ .proc_handler = proc_do_sync_threshold,
+ },
+ {
+ .procname = "nat_icmp_send",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_IP_VS_DEBUG
+ {
+ .procname = "debug_level",
+ .data = &sysctl_ip_vs_debug_level,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#if 0
{
.procname = "timeout_established",
@@ -1686,41 +1729,6 @@ static struct ctl_table vs_vars[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
- {
- .procname = "cache_bypass",
- .data = &sysctl_ip_vs_cache_bypass,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_nodest_conn",
- .data = &sysctl_ip_vs_expire_nodest_conn,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_quiescent_template",
- .data = &sysctl_ip_vs_expire_quiescent_template,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sync_threshold",
- .data = &sysctl_ip_vs_sync_threshold,
- .maxlen = sizeof(sysctl_ip_vs_sync_threshold),
- .mode = 0644,
- .proc_handler = proc_do_sync_threshold,
- },
- {
- .procname = "nat_icmp_send",
- .data = &sysctl_ip_vs_nat_icmp_send,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
{ }
};
@@ -1732,11 +1740,10 @@ const struct ctl_path net_vs_ctl_path[] = {
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
-static struct ctl_table_header * sysctl_header;
-
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
+ struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct list_head *table;
int bucket;
};
@@ -1763,6 +1770,7 @@ static inline const char *ip_vs_fwd_name(unsigned flags)
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
+ struct net *net = seq_file_net(seq);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
@@ -1770,7 +1778,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- if (pos-- == 0){
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
@@ -1781,7 +1789,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- if (pos-- == 0) {
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
@@ -1935,7 +1943,7 @@ static const struct seq_operations ip_vs_info_seq_ops = {
static int ip_vs_info_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &ip_vs_info_seq_ops,
+ return seq_open_net(inode, file, &ip_vs_info_seq_ops,
sizeof(struct ip_vs_iter));
}
@@ -1949,13 +1957,11 @@ static const struct file_operations ip_vs_info_fops = {
#endif
-struct ip_vs_stats ip_vs_stats = {
- .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
@@ -1963,29 +1969,29 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
- spin_lock_bh(&ip_vs_stats.lock);
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
- ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
- (unsigned long long) ip_vs_stats.ustats.inbytes,
- (unsigned long long) ip_vs_stats.ustats.outbytes);
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
+ tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq,"%8X %8X %8X %16X %16X\n",
- ip_vs_stats.ustats.cps,
- ip_vs_stats.ustats.inpps,
- ip_vs_stats.ustats.outpps,
- ip_vs_stats.ustats.inbps,
- ip_vs_stats.ustats.outbps);
- spin_unlock_bh(&ip_vs_stats.lock);
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
return 0;
}
static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, ip_vs_stats_show, NULL);
+ return single_open_net(inode, file, ip_vs_stats_show);
}
static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2002,70 @@ static const struct file_operations ip_vs_stats_fops = {
.release = single_release,
};
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+ int i;
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Total Incoming Outgoing Incoming Outgoing\n");
+ seq_printf(seq,
+ "CPU Conns Packets Packets Bytes Bytes\n");
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+ seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+ i, u->ustats.conns, u->ustats.inpkts,
+ u->ustats.outpkts, (__u64)u->ustats.inbytes,
+ (__u64)u->ustats.outbytes);
+ }
+
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
+ tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+ tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
+ seq_printf(seq, " %8X %8X %8X %16X %16X\n",
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
+
+ return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+ .owner = THIS_MODULE,
+ .open = ip_vs_stats_percpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
@@ -2010,19 +2073,22 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
@@ -2087,6 +2153,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
+ struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2188,20 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
- ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+ ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+ ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+ dm->syncid);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = stop_sync_thread(dm->state);
+ ret = stop_sync_thread(net, dm->state);
goto out_unlock;
}
@@ -2148,7 +2216,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
@@ -2165,10 +2233,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
- svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+ svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
- svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2249,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (svc != NULL)
ret = -EEXIST;
else
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
@@ -2241,7 +2309,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
}
static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+ const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
@@ -2252,7 +2321,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2271,7 +2340,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2291,7 +2360,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
}
static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
@@ -2299,9 +2368,9 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
int ret = 0;
if (get->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+ svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
get->port);
if (svc) {
@@ -2336,17 +2405,21 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
}
static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
#ifdef CONFIG_IP_VS_PROTO_TCP
- u->tcp_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
- u->tcp_fin_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+ u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
u->udp_timeout =
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+ pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
@@ -2375,7 +2448,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
+ struct net *net = sock_net(sk);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2418,7 +2494,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
- info.num_services = ip_vs_num_services;
+ info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
@@ -2437,7 +2513,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_service_entries(get, user);
+ ret = __ip_vs_get_service_entries(net, get, user);
}
break;
@@ -2450,10 +2526,11 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, entry->protocol,
- &addr, entry->port);
+ svc = __ip_vs_service_find(net, AF_INET,
+ entry->protocol, &addr,
+ entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2553,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_dest_entries(get, user);
+ ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
@@ -2484,7 +2561,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
@@ -2495,15 +2572,17 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
- if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+ if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
- d[0].syncid = ip_vs_master_syncid;
+ strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
+ d[0].syncid = ipvs->master_syncid;
}
- if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+ if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
- d[1].syncid = ip_vs_backup_syncid;
+ strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
+ d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
@@ -2542,6 +2621,7 @@ static struct genl_family ip_vs_genl_family = {
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_MAX,
+ .netnsok = true, /* Make ipvsadm to work on netns */
};
/* Policy used for first-level command attributes */
@@ -2696,11 +2776,12 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2711,7 +2792,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2727,7 +2808,8 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+ struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
struct ip_vs_service **ret_svc)
{
@@ -2770,9 +2852,9 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
}
if (usvc->fwmark)
- svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
else
- svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+ svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
*ret_svc = svc;
@@ -2809,13 +2891,14 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
return 0;
}
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+ struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
- ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+ ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
return ret ? ERR_PTR(ret) : svc;
}
@@ -2883,6 +2966,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
@@ -2891,7 +2975,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
goto out_err;
- svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+ svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc) || svc == NULL)
goto out_err;
@@ -3005,20 +3090,23 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ struct net *net = skb_net(skb);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
mutex_lock(&__ip_vs_mutex);
- if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
- ip_vs_master_mcast_ifn,
- ip_vs_master_syncid, cb) < 0)
+ ipvs->master_mcast_ifn,
+ ipvs->master_syncid, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
- if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+ if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
- ip_vs_backup_mcast_ifn,
- ip_vs_backup_syncid, cb) < 0)
+ ipvs->backup_mcast_ifn,
+ ipvs->backup_syncid, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
@@ -3030,31 +3118,33 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
- return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+ return start_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
}
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ return stop_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
}
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3156,7 @@ static int ip_vs_genl_set_config(struct nlattr **attrs)
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
- return ip_vs_set_timeout(&t);
+ return ip_vs_set_timeout(net, &t);
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3166,20 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
- ret = ip_vs_genl_set_config(info->attrs);
+ ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_NEW_DAEMON ||
cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3195,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
}
if (cmd == IPVS_CMD_NEW_DAEMON)
- ret = ip_vs_genl_new_daemon(daemon_attrs);
+ ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
- ret = ip_vs_genl_del_daemon(daemon_attrs);
+ ret = ip_vs_genl_del_daemon(net, daemon_attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out;
}
@@ -3117,7 +3211,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = 1;
- ret = ip_vs_genl_parse_service(&usvc,
+ ret = ip_vs_genl_parse_service(net, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
@@ -3147,7 +3241,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
else
ret = -EEXIST;
break;
@@ -3185,7 +3279,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3312,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_service *svc;
- svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+ svc = ip_vs_genl_find_service(net,
+ info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
@@ -3234,7 +3333,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3479,173 @@ static void ip_vs_genl_unregister(void)
/* End of Generic Netlink interface definitions */
+/*
+ * per netns intit/exit func.
+ */
+int __net_init __ip_vs_control_init(struct net *net)
+{
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ctl_table *tbl;
+
+ atomic_set(&ipvs->dropentry, 0);
+ spin_lock_init(&ipvs->dropentry_lock);
+ spin_lock_init(&ipvs->droppacket_lock);
+ spin_lock_init(&ipvs->securetcp_lock);
+ ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+ /* Initialize rs_table */
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+ INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+ INIT_LIST_HEAD(&ipvs->dest_trash);
+ atomic_set(&ipvs->ftpsvc_counter, 0);
+ atomic_set(&ipvs->nullsvc_counter, 0);
+
+ /* procfs stats */
+ ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
+ if (ipvs->tot_stats == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!ipvs->cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
+ spin_lock_init(&ipvs->tot_stats->lock);
+
+ proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+ proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+ proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+ &ip_vs_stats_percpu_fops);
+
+ if (!net_eq(net, &init_net)) {
+ tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
+ } else
+ tbl = vs_vars;
+ /* Initialize sysctl defaults */
+ idx = 0;
+ ipvs->sysctl_amemthresh = 1024;
+ tbl[idx++].data = &ipvs->sysctl_amemthresh;
+ ipvs->sysctl_am_droprate = 10;
+ tbl[idx++].data = &ipvs->sysctl_am_droprate;
+ tbl[idx++].data = &ipvs->sysctl_drop_entry;
+ tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+ tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+ tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+ ipvs->sysctl_snat_reroute = 1;
+ tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+ ipvs->sysctl_sync_ver = 1;
+ tbl[idx++].data = &ipvs->sysctl_sync_ver;
+ tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+ tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+ tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+ ipvs->sysctl_sync_threshold[0] = 3;
+ ipvs->sysctl_sync_threshold[1] = 50;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+#ifdef CONFIG_SYSCTL
+ ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+ tbl);
+ if (ipvs->sysctl_hdr == NULL) {
+ if (!net_eq(net, &init_net))
+ kfree(tbl);
+ goto err_dup;
+ }
+#endif
+ ip_vs_new_estimator(net, ipvs->tot_stats);
+ ipvs->sysctl_tbl = tbl;
+ /* Schedule defense work */
+ INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+ return 0;
+
+err_dup:
+ free_percpu(ipvs->cpustats);
+err_alloc:
+ kfree(ipvs->tot_stats);
+ return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_trash_cleanup(net);
+ ip_vs_kill_estimator(net, ipvs->tot_stats);
+ cancel_delayed_work_sync(&ipvs->defense_work);
+ cancel_work_sync(&ipvs->defense_work.work);
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->sysctl_hdr);
+#endif
+ proc_net_remove(net, "ip_vs_stats_percpu");
+ proc_net_remove(net, "ip_vs_stats");
+ proc_net_remove(net, "ip_vs");
+ free_percpu(ipvs->cpustats);
+ kfree(ipvs->tot_stats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+ .init = __ip_vs_control_init,
+ .exit = __ip_vs_control_cleanup,
+};
int __init ip_vs_control_init(void)
{
- int ret;
int idx;
+ int ret;
EnterFunction(2);
- /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
- for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+ ret = register_pernet_subsys(&ipvs_control_ops);
+ if (ret) {
+ pr_err("cannot register namespace.\n");
+ goto err;
}
- smp_wmb();
+
+ smp_wmb(); /* Do we really need it now ? */
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
- return ret;
+ goto err_net;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
nf_unregister_sockopt(&ip_vs_sockopts);
- return ret;
+ goto err_net;
}
- proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
- proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
- ip_vs_new_estimator(&ip_vs_stats);
-
- /* Hook the defense timer */
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
LeaveFunction(2);
return 0;
+
+err_net:
+ unregister_pernet_subsys(&ipvs_control_ops);
+err:
+ return ret;
}
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
- ip_vs_trash_cleanup();
- cancel_delayed_work_sync(&defense_work);
- cancel_work_sync(&defense_work.work);
- ip_vs_kill_estimator(&ip_vs_stats);
- unregister_sysctl_table(sysctl_header);
- proc_net_remove(&init_net, "ip_vs_stats");
- proc_net_remove(&init_net, "ip_vs");
+ unregister_pernet_subsys(&ipvs_control_ops);
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ff28801962e0..f560a05c965a 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -8,8 +8,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
- *
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * Affected data: est_list and est_lock.
+ * estimation_timer() runs with timer per netns.
+ * get_stats()) do the per cpu summing.
*/
#define KMSG_COMPONENT "IPVS"
@@ -48,11 +52,42 @@
*/
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+ struct ip_vs_cpu_stats *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+ unsigned int start;
+ __u64 inbytes, outbytes;
+ if (i) {
+ sum->conns += s->ustats.conns;
+ sum->inpkts += s->ustats.inpkts;
+ sum->outpkts += s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ inbytes = s->ustats.inbytes;
+ outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ sum->inbytes += inbytes;
+ sum->outbytes += outbytes;
+ } else {
+ sum->conns = s->ustats.conns;
+ sum->inpkts = s->ustats.inpkts;
+ sum->outpkts = s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ sum->inbytes = s->ustats.inbytes;
+ sum->outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ }
+ }
+}
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
@@ -62,11 +97,16 @@ static void estimation_timer(unsigned long arg)
u32 n_inpkts, n_outpkts;
u64 n_inbytes, n_outbytes;
u32 rate;
+ struct net *net = (struct net *)arg;
+ struct netns_ipvs *ipvs;
- spin_lock(&est_lock);
- list_for_each_entry(e, &est_list, list) {
+ ipvs = net_ipvs(net);
+ ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
+ spin_lock(&ipvs->est_lock);
+ list_for_each_entry(e, &ipvs->est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
+ ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
spin_lock(&s->lock);
n_conns = s->ustats.conns;
n_inpkts = s->ustats.inpkts;
@@ -75,38 +115,39 @@ static void estimation_timer(unsigned long arg)
n_outbytes = s->ustats.outbytes;
/* scaled by 2^10, but divided 2 seconds */
- rate = (n_conns - e->last_conns)<<9;
+ rate = (n_conns - e->last_conns) << 9;
e->last_conns = n_conns;
- e->cps += ((long)rate - (long)e->cps)>>2;
- s->ustats.cps = (e->cps+0x1FF)>>10;
+ e->cps += ((long)rate - (long)e->cps) >> 2;
+ s->ustats.cps = (e->cps + 0x1FF) >> 10;
- rate = (n_inpkts - e->last_inpkts)<<9;
+ rate = (n_inpkts - e->last_inpkts) << 9;
e->last_inpkts = n_inpkts;
- e->inpps += ((long)rate - (long)e->inpps)>>2;
- s->ustats.inpps = (e->inpps+0x1FF)>>10;
+ e->inpps += ((long)rate - (long)e->inpps) >> 2;
+ s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
- rate = (n_outpkts - e->last_outpkts)<<9;
+ rate = (n_outpkts - e->last_outpkts) << 9;
e->last_outpkts = n_outpkts;
- e->outpps += ((long)rate - (long)e->outpps)>>2;
- s->ustats.outpps = (e->outpps+0x1FF)>>10;
+ e->outpps += ((long)rate - (long)e->outpps) >> 2;
+ s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
- rate = (n_inbytes - e->last_inbytes)<<4;
+ rate = (n_inbytes - e->last_inbytes) << 4;
e->last_inbytes = n_inbytes;
- e->inbps += ((long)rate - (long)e->inbps)>>2;
- s->ustats.inbps = (e->inbps+0xF)>>5;
+ e->inbps += ((long)rate - (long)e->inbps) >> 2;
+ s->ustats.inbps = (e->inbps + 0xF) >> 5;
- rate = (n_outbytes - e->last_outbytes)<<4;
+ rate = (n_outbytes - e->last_outbytes) << 4;
e->last_outbytes = n_outbytes;
- e->outbps += ((long)rate - (long)e->outbps)>>2;
- s->ustats.outbps = (e->outbps+0xF)>>5;
+ e->outbps += ((long)rate - (long)e->outbps) >> 2;
+ s->ustats.outbps = (e->outbps + 0xF) >> 5;
spin_unlock(&s->lock);
}
- spin_unlock(&est_lock);
- mod_timer(&est_timer, jiffies + 2*HZ);
+ spin_unlock(&ipvs->est_lock);
+ mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
}
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
INIT_LIST_HEAD(&est->list);
@@ -126,18 +167,19 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->ustats.outbytes;
est->outbps = stats->ustats.outbps<<5;
- spin_lock_bh(&est_lock);
- list_add(&est->list, &est_list);
- spin_unlock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
+ list_add(&est->list, &ipvs->est_list);
+ spin_unlock_bh(&ipvs->est_lock);
}
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
- spin_lock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
list_del(&est->list);
- spin_unlock_bh(&est_lock);
+ spin_unlock_bh(&ipvs->est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
@@ -157,13 +199,35 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
est->outbps = 0;
}
-int __init ip_vs_estimator_init(void)
+static int __net_init __ip_vs_estimator_init(struct net *net)
{
- mod_timer(&est_timer, jiffies + 2 * HZ);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->est_list);
+ spin_lock_init(&ipvs->est_lock);
+ setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+ mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
return 0;
}
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+ del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_estimator_init,
+ .exit = __ip_vs_estimator_exit,
+};
+
+int __init ip_vs_estimator_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_estimator_cleanup(void)
{
- del_timer_sync(&est_timer);
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 75455000ad1c..6b5dd6ddaae9 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -157,6 +157,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
int ret = 0;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol,
- &from, port, &cp->caddr, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &from, port,
+ &cp->caddr, 0, &p);
n_cp = ip_vs_conn_out_get(&p);
}
if (!n_cp) {
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+ AF_INET, IPPROTO_TCP, &cp->caddr,
0, &cp->vaddr, port, &p);
n_cp = ip_vs_conn_new(&p, &from, port,
IP_VS_CONN_F_NO_CPORT |
IP_VS_CONN_F_NFCT,
- cp->dest);
+ cp->dest, skb->mark);
if (!n_cp)
return 0;
@@ -257,8 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
* would be adjusted twice.
*/
+ net = skb_net(skb);
cp->app_data = NULL;
- ip_vs_tcp_conn_listen(n_cp);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return ret;
}
@@ -287,6 +291,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
- &cp->vaddr, htons(ntohs(cp->vport)-1),
- &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &to, port, &cp->vaddr,
+ htons(ntohs(cp->vport)-1), &p);
n_cp = ip_vs_conn_in_get(&p);
if (!n_cp) {
n_cp = ip_vs_conn_new(&p, &cp->daddr,
htons(ntohs(cp->dport)-1),
- IP_VS_CONN_F_NFCT, cp->dest);
+ IP_VS_CONN_F_NFCT, cp->dest,
+ skb->mark);
if (!n_cp)
return 0;
@@ -377,7 +383,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
/*
* Move tunnel to listen state
*/
- ip_vs_tcp_conn_listen(n_cp);
+ net = skb_net(skb);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return 1;
@@ -398,23 +405,22 @@ static struct ip_vs_app ip_vs_ftp = {
.pkt_in = ip_vs_ftp_in,
};
-
/*
- * ip_vs_ftp initialization
+ * per netns ip_vs_ftp initialization
*/
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
struct ip_vs_app *app = &ip_vs_ftp;
- ret = register_ip_vs_app(app);
+ ret = register_ip_vs_app(net, app);
if (ret)
return ret;
for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
- ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+ ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
if (ret)
break;
pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@ static int __init ip_vs_ftp_init(void)
}
if (ret)
- unregister_ip_vs_app(app);
+ unregister_ip_vs_app(net, app);
return ret;
}
+/*
+ * netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+ struct ip_vs_app *app = &ip_vs_ftp;
+
+ unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+ .init = __ip_vs_ftp_init,
+ .exit = __ip_vs_ftp_exit,
+};
+int __init ip_vs_ftp_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_ftp_ops);
+ return rv;
+}
/*
* ip_vs_ftp finish.
*/
static void __exit ip_vs_ftp_exit(void)
{
- unregister_ip_vs_app(&ip_vs_ftp);
+ unregister_pernet_subsys(&ip_vs_ftp_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9323f8944199..00b5ffab3768 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -70,7 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
/*
@@ -117,7 +116,7 @@ struct ip_vs_lblc_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblc_expiration",
- .data = &sysctl_ip_vs_lblc_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -125,8 +124,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
list_del(&en->list);
@@ -248,6 +245,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies;
int i, j;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -255,7 +253,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now,
- en->lastuse + sysctl_ip_vs_lblc_expiration))
+ en->lastuse +
+ ipvs->sysctl_lblc_expiration))
continue;
ip_vs_lblc_free(en);
@@ -543,23 +542,73 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.schedule = ip_vs_lblc_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblc_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblc_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+ ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblc_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblc_ctl_table);
+ if (!ipvs->lblc_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblc_ops = {
+ .init = __ip_vs_lblc_init,
+ .exit = __ip_vs_lblc_exit,
+};
static int __init ip_vs_lblc_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblc_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
return ret;
}
-
static void __exit ip_vs_lblc_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index dbeed8ea421a..bfa25f1ea9e4 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -70,8 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
/*
* for IPVS lblcr entry hash table
@@ -296,7 +294,7 @@ struct ip_vs_lblcr_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblcr_expiration",
- .data = &sysctl_ip_vs_lblcr_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -304,8 +302,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
list_del(&en->list);
@@ -425,14 +421,15 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
unsigned long now = jiffies;
int i, j;
struct ip_vs_lblcr_entry *en, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
- if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
- now))
+ if (time_after(en->lastuse
+ + ipvs->sysctl_lblcr_expiration, now))
continue;
ip_vs_lblcr_free(en);
@@ -664,6 +661,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
read_lock(&svc->sched_lock);
en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
if (en) {
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/* We only hold a read lock, but this is atomic */
en->lastuse = jiffies;
@@ -675,7 +673,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* More than one destination + enough time passed by, cleanup */
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
- sysctl_ip_vs_lblcr_expiration)) {
+ ipvs->sysctl_lblcr_expiration)) {
struct ip_vs_dest *m;
write_lock(&en->set.lock);
@@ -744,23 +742,73 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.schedule = ip_vs_lblcr_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblcr_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblcr_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+ ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblcr_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblcr_ctl_table);
+ if (!ipvs->lblcr_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+ .init = __ip_vs_lblcr_init,
+ .exit = __ip_vs_lblcr_exit,
+};
static int __init ip_vs_lblcr_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
return ret;
}
-
static void __exit ip_vs_lblcr_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 4680647cd450..f454c80df0a7 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -141,6 +141,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
struct nf_conntrack_tuple *orig, new_reply;
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = nf_ct_net(ct);
if (exp->tuple.src.l3num != PF_INET)
return;
@@ -155,7 +156,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
/* RS->CLIENT */
orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+ ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
&orig->src.u3, orig->src.u.tcp.port,
&orig->dst.u3, orig->dst.u.tcp.port, &p);
cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
" for conn " FMT_CONN "\n",
__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
- h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+ &tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 3414af70ee12..5cf859ccb31b 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -29,12 +29,11 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc)
}
/* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
{
struct ip_vs_pe *pe;
- IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+ IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
pe_name);
spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@ ip_vs_pe_getbyname(const char *pe_name)
}
/* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
{
struct ip_vs_pe *pe;
/* Search for the pe by name */
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
/* If pe not found, load the module and search again */
if (!pe) {
request_module("ip_vs_pe_%s", name);
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
}
return pe;
}
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
- if (pe && pe->module)
- module_put(pe->module);
-}
-
/* Register a pe in the pe list */
int register_ip_vs_pe(struct ip_vs_pe *pe)
{
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index b8b4e9620f3e..0d83bc01fed4 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -71,6 +71,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
struct ip_vs_iphdr iph;
unsigned int dataoff, datalen, matchoff, matchlen;
const char *dptr;
+ int retc;
ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
@@ -83,6 +84,8 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
if (dataoff >= skb->len)
return -EINVAL;
+ if ((retc=skb_linearize(skb)) < 0)
+ return retc;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index c53998390877..17484a4416ef 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -60,6 +60,35 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+ defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+ defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ * register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ struct ip_vs_proto_data *pd =
+ kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+ if (!pd) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ pd->pp = pp; /* For speed issues */
+ pd->next = ipvs->proto_data_table[hash];
+ ipvs->proto_data_table[hash] = pd;
+ atomic_set(&pd->appcnt, 0); /* Init app counter */
+
+ if (pp->init_netns != NULL)
+ pp->init_netns(net, pd);
+
+ return 0;
+}
+#endif
/*
* unregister an ipvs protocol
@@ -82,6 +111,29 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
return -ESRCH;
}
+/*
+ * unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data **pd_p;
+ unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+ pd_p = &ipvs->proto_data_table[hash];
+ for (; *pd_p; pd_p = &(*pd_p)->next) {
+ if (*pd_p == pd) {
+ *pd_p = pd->next;
+ if (pd->pp->exit_netns != NULL)
+ pd->pp->exit_netns(net, pd);
+ kfree(pd);
+ return 0;
+ }
+ }
+
+ return -ESRCH;
+}
/*
* get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
}
EXPORT_SYMBOL(ip_vs_proto_get);
+/*
+ * get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+ struct ip_vs_proto_data *pd;
+ unsigned hash = IP_VS_PROTO_HASH(proto);
+
+ for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+ if (pd->pp->protocol == proto)
+ return pd;
+ }
+
+ return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
/*
* Propagate event for state change to all protocols
*/
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
{
- struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
int i;
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
- for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
- if (pp->timeout_change)
- pp->timeout_change(pp, flags);
+ for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+ if (pd->pp->timeout_change)
+ pd->pp->timeout_change(pd, flags);
}
}
}
@@ -236,6 +313,46 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
}
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+ return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd;
+ int i;
+
+ /* unregister all the ipvs proto data for this netns */
+ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+ while ((pd = ipvs->proto_data_table[i]) != NULL)
+ unregister_ip_vs_proto_netns(net, pd);
+ }
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+ .init = __ip_vs_protocol_init,
+ .exit = __ip_vs_protocol_cleanup,
+};
int __init ip_vs_protocol_init(void)
{
@@ -265,6 +382,7 @@ int __init ip_vs_protocol_init(void)
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
pr_info("Registered protocols (%s)\n", &protocols[2]);
+ return register_pernet_subsys(&ipvs_proto_ops);
return 0;
}
@@ -275,6 +393,7 @@ void ip_vs_protocol_cleanup(void)
struct ip_vs_protocol *pp;
int i;
+ unregister_pernet_subsys(&ipvs_proto_ops);
/* unregister all the ipvs protocols */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pp = ip_vs_proto_table[i]) != NULL)
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 3a0461117d3f..5b8eb8b12c3e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,28 +41,30 @@ struct isakmp_hdr {
#define PORT_ISAKMP 500
static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
- int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+ const struct ip_vs_iphdr *iph, int inverse,
+ struct ip_vs_conn_param *p)
{
if (likely(!inverse))
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->saddr, htons(PORT_ISAKMP),
&iph->daddr, htons(PORT_ISAKMP), p);
else
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->daddr, htons(PORT_ISAKMP),
&iph->saddr, htons(PORT_ISAKMP), p);
}
static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph, unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_in_get(&p);
if (!cp) {
/*
@@ -72,7 +74,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -83,21 +85,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
static struct ip_vs_conn *
ah_esp_conn_out_get(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_out_get(&p);
if (!cp) {
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -107,7 +109,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
/*
@@ -117,26 +119,14 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
return 0;
}
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
@@ -149,7 +139,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
- .set_state_timeout = NULL,
};
#endif
@@ -159,8 +148,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1ea96bcd342b..fb2d04ac5d4e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,9 +9,10 @@
#include <net/ip_vs.h>
static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
sctp_chunkhdr_t _schunkh, *sch;
sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
sizeof(_schunkh), &_schunkh);
if (sch == NULL)
return 0;
-
+ net = skb_net(skb);
if ((sch->type == SCTP_CID_INIT) &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, sh->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -46,14 +47,19 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
-
+ /* NF_ACCEPT */
return 1;
}
@@ -856,7 +862,7 @@ static struct ipvs_sctp_nextstate
/*
* Timeout table[state]
*/
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
[IP_VS_SCTP_S_NONE] = 2 * HZ,
[IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
[IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
@@ -900,20 +906,8 @@ static const char *sctp_state_name(int state)
return "?";
}
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
- sctp_state_name_table, sname, to);
-}
-
static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, const struct sk_buff *skb)
{
sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((direction == IP_VS_DIR_OUTPUT) ?
"output " : "input "),
IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
}
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = next_state];
+ else /* What to do ? */
+ cp->timeout = sctp_timeouts[cp->state = next_state];
- cp->timeout = pp->timeout_table[cp->state = next_state];
-
- return 1;
+ return 1;
}
static int
sctp_state_transition(struct ip_vs_conn *cp, int direction,
- const struct sk_buff *skb, struct ip_vs_protocol *pp)
+ const struct sk_buff *skb, struct ip_vs_proto_data *pd)
{
int ret = 0;
spin_lock(&cp->lock);
- ret = set_sctp_state(pp, cp, direction, skb);
+ ret = set_sctp_state(pd, cp, direction, skb);
spin_unlock(&cp->lock);
return ret;
}
-/*
- * Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS 4
-#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
static inline __u16 sctp_app_hashkey(__be16 port)
{
return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
& SCTP_APP_TAB_MASK;
}
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
hash = sctp_app_hashkey(port);
- spin_lock_bh(&sctp_app_lock);
- list_for_each_entry(i, &sctp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &sctp_apps[hash]);
- atomic_inc(&ip_vs_protocol_sctp.appcnt);
+ list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
return ret;
}
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&sctp_app_lock);
- atomic_dec(&ip_vs_protocol_sctp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
}
static int sctp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -1074,12 +1066,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = sctp_app_hashkey(cp->vport);
- spin_lock(&sctp_app_lock);
- list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+ spin_lock(&ipvs->sctp_app_lock);
+ list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
out:
return result;
}
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(sctp_apps);
- pp->timeout_table = sctp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->tcp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+ sizeof(sctp_timeouts));
+}
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
-
+ kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_sctp = {
- .name = "SCTP",
- .protocol = IPPROTO_SCTP,
- .num_states = IP_VS_SCTP_S_LAST,
- .dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_sctp_init,
- .exit = ip_vs_sctp_exit,
- .register_app = sctp_register_app,
+ .name = "SCTP",
+ .protocol = IPPROTO_SCTP,
+ .num_states = IP_VS_SCTP_S_LAST,
+ .dont_defrag = 0,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_sctp_init,
+ .exit_netns = __ip_vs_sctp_exit,
+ .register_app = sctp_register_app,
.unregister_app = sctp_unregister_app,
- .conn_schedule = sctp_conn_schedule,
- .conn_in_get = ip_vs_conn_in_get_proto,
- .conn_out_get = ip_vs_conn_out_get_proto,
- .snat_handler = sctp_snat_handler,
- .dnat_handler = sctp_dnat_handler,
- .csum_check = sctp_csum_check,
- .state_name = sctp_state_name,
+ .conn_schedule = sctp_conn_schedule,
+ .conn_in_get = ip_vs_conn_in_get_proto,
+ .conn_out_get = ip_vs_conn_out_get_proto,
+ .snat_handler = sctp_snat_handler,
+ .dnat_handler = sctp_dnat_handler,
+ .csum_check = sctp_csum_check,
+ .state_name = sctp_state_name,
.state_transition = sctp_state_transition,
- .app_conn_bind = sctp_app_conn_bind,
- .debug_packet = ip_vs_tcpudp_debug_packet,
- .timeout_change = sctp_timeout_change,
- .set_state_timeout = sctp_set_state_timeout,
+ .app_conn_bind = sctp_app_conn_bind,
+ .debug_packet = ip_vs_tcpudp_debug_packet,
+ .timeout_change = NULL,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index f6c5200e2146..c0cc341b840d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -9,8 +9,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
*
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * tcp_timeouts table has copy per netns in a hash table per
+ * protocol ip_vs_proto_data and is handled by netns
*/
#define KMSG_COMPONENT "IPVS"
@@ -28,9 +32,10 @@
#include <net/ip_vs.h>
static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct tcphdr _tcph, *th;
struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
+ net = skb_net(skb);
/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
if (th->syn &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
- th->dest))) {
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+ &iph.daddr, th->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -63,13 +68,19 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,7 +349,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = {
/*
* Timeout table[state]
*/
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = 2*HZ,
[IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
[IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
@@ -437,10 +448,7 @@ static struct tcp_states_t tcp_states_dos [] = {
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
int on = (flags & 1); /* secure_tcp */
@@ -450,14 +458,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
- tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
- tcp_state_name_table, sname, to);
+ pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@ static inline int tcp_state_idx(struct tcphdr *th)
}
static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, struct tcphdr *th)
{
int state_idx;
@@ -497,7 +498,8 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
goto tcp_state_out;
}
- new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+ new_state =
+ pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
tcp_state_out:
if (new_state != cp->state) {
@@ -505,7 +507,7 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
- cp->timeout = pp->timeout_table[cp->state = new_state];
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = new_state];
+ else /* What to do ? */
+ cp->timeout = tcp_timeouts[cp->state = new_state];
}
-
/*
* Handle state transitions
*/
static int
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
struct tcphdr _tcph, *th;
@@ -560,23 +564,12 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
return 0;
spin_lock(&cp->lock);
- set_tcp_state(pp, cp, direction, th);
+ set_tcp_state(pd, cp, direction, th);
spin_unlock(&cp->lock);
return 1;
}
-
-/*
- * Hash table for TCP application incarnations
- */
-#define TCP_APP_TAB_BITS 4
-#define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
-#define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
static inline __u16 tcp_app_hashkey(__be16 port)
{
return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@ static inline __u16 tcp_app_hashkey(__be16 port)
}
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
hash = tcp_app_hashkey(port);
- spin_lock_bh(&tcp_app_lock);
- list_for_each_entry(i, &tcp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &tcp_apps[hash]);
- atomic_inc(&ip_vs_protocol_tcp.appcnt);
+ list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
return ret;
}
static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&tcp_app_lock);
- atomic_dec(&ip_vs_protocol_tcp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
}
static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -633,12 +632,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = tcp_app_hashkey(cp->vport);
- spin_lock(&tcp_app_lock);
- list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+ spin_lock(&ipvs->tcp_app_lock);
+ list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
out:
return result;
@@ -665,24 +664,35 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/*
* Set LISTEN timeout. (ip_vs_conn_put will setup timer)
*/
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
{
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
spin_lock(&cp->lock);
cp->state = IP_VS_TCP_S_LISTEN;
- cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+ cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+ : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
spin_unlock(&cp->lock);
}
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(tcp_apps);
- pp->timeout_table = tcp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->tcp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+ sizeof(tcp_timeouts));
+ pd->tcp_state_table = tcp_states;
+}
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -691,9 +701,10 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.protocol = IPPROTO_TCP,
.num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_tcp_init,
- .exit = ip_vs_tcp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_tcp_init,
+ .exit_netns = __ip_vs_tcp_exit,
.register_app = tcp_register_app,
.unregister_app = tcp_unregister_app,
.conn_schedule = tcp_conn_schedule,
@@ -707,5 +718,4 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.app_conn_bind = tcp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = tcp_timeout_change,
- .set_state_timeout = tcp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 9d106a06bb0a..f1282cbe6fe3 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -9,7 +9,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
*
*/
@@ -28,9 +29,10 @@
#include <net/ip6_checksum.h>
static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct udphdr _udph, *uh;
struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
- svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ net = skb_net(skb);
+ svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, uh->dest);
if (svc) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -62,13 +64,19 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,19 +346,6 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 1;
}
-
-/*
- * Note: the caller guarantees that only one of register_app,
- * unregister_app or app_conn_bind is called each time.
- */
-
-#define UDP_APP_TAB_BITS 4
-#define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
-#define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
static inline __u16 udp_app_hashkey(__be16 port)
{
return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@ static inline __u16 udp_app_hashkey(__be16 port)
}
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
hash = udp_app_hashkey(port);
- spin_lock_bh(&udp_app_lock);
- list_for_each_entry(i, &udp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->udp_app_lock);
+ list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &udp_apps[hash]);
- atomic_inc(&ip_vs_protocol_udp.appcnt);
+ list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
return ret;
}
static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&udp_app_lock);
- atomic_dec(&ip_vs_protocol_udp.appcnt);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ spin_lock_bh(&ipvs->udp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
}
static int udp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -407,12 +408,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = udp_app_hashkey(cp->vport);
- spin_lock(&udp_app_lock);
- list_for_each_entry(inc, &udp_apps[hash], p_list) {
+ spin_lock(&ipvs->udp_app_lock);
+ list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
out:
return result;
}
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = 5*60*HZ,
[IP_VS_UDP_S_LAST] = 2*HZ,
};
@@ -446,14 +447,6 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_LAST] = "BUG!",
};
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
- udp_state_name_table, sname, to);
-}
-
static const char * udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@ static const char * udp_state_name(int state)
static int
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+ if (unlikely(!pd)) {
+ pr_err("UDP no ns data\n");
+ return 0;
+ }
+
+ cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
return 1;
}
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(udp_apps);
- pp->timeout_table = udp_timeouts;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->udp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+ sizeof(udp_timeouts));
}
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -486,8 +489,10 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.protocol = IPPROTO_UDP,
.num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
- .init = udp_init,
- .exit = udp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __udp_init,
+ .exit_netns = __udp_exit,
.conn_schedule = udp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.app_conn_bind = udp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
- .set_state_timeout = udp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index ab85aedea17e..2a2a8363ca16 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -5,6 +5,18 @@
* high-performance and highly available server based on a
* cluster of servers.
*
+ * Version 1, is capable of handling both version 0 and 1 messages.
+ * Version 0 is the plain old format.
+ * Note Version 0 receivers will just drop Ver 1 messages.
+ * Version 1 is capable of handle IPv6, Persistence data,
+ * time-outs, and firewall marks.
+ * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions Message: is a complete datagram
+ * Sync_conn: is a part of a Message
+ * Param Data is an option to a Sync_conn.
+ *
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* ip_vs_sync: sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
* Alexandre Cassen : Added SyncID support for incoming sync
* messages filtering.
* Justin Ossevoort : Fix endian problem on sync message size.
+ * Hans Schillstrom : Added Version 1: i.e. IPv6,
+ * Persistence support, fwmark and time-out.
*/
#define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
+
#include <net/ip.h>
#include <net/sock.h>
@@ -43,11 +59,13 @@
#define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
#define IP_VS_SYNC_PORT 8848 /* multicast port */
+#define SYNC_PROTO_VER 1 /* Protocol version in header */
/*
* IPVS sync connection entry
+ * Version 0, i.e. original version.
*/
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
__u8 reserved;
/* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@ struct ip_vs_sync_conn_options {
struct ip_vs_seq out_seq; /* outgoing seq. struct */
};
+/*
+ Sync Connection format (sync_conn)
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Type | Protocol | Ver. | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Flags |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | State | cport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | vport | dport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | fwmark |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | timeout (in sec.) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ... |
+ | IP-Addresses (v4 or v6) |
+ | ... |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ Optional Parameters.
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param. Type | Param. Length | Param. data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+ | ... |
+ | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | Param Type | Param. Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param data |
+ | Last Param data should be padded for 32 bit alignment |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ * Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ __be32 caddr; /* client address */
+ __be32 vaddr; /* virtual address */
+ __be32 daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ struct in6_addr caddr; /* client address */
+ struct in6_addr vaddr; /* virtual address */
+ struct in6_addr daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+ struct ip_vs_sync_v4 v4;
+ struct ip_vs_sync_v6 v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6 0
+#define STYPE_F_INET6 (1 << STYPE_INET6)
+
+#define SVER_SHIFT 12 /* Shift to get version */
+#define SVER_MASK 0x0fff /* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA 1
+#define IPVS_OPT_PE_DATA 2
+#define IPVS_OPT_PE_NAME 3
+#define IPVS_OPT_PARAM 7
+
+#define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
+
struct ip_vs_sync_thread_data {
+ struct net *net;
struct socket *sock;
char *buf;
};
-#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
#define FULL_CONN_SIZE \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
/*
- The master mulitcasts messages to the backup load balancers in the
- following format.
+ The master mulitcasts messages (Datagrams) to the backup load balancers
+ in the following format.
+
+ Version 1:
+ Note, first byte should be Zero, so ver 0 receivers will drop the packet.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Count Conns | SyncID | Size |
+ | 0 | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | Version | Reserved, set to Zero |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
- | . |
+ ~ . ~
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (n) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IPVS Sync Connection (1) |
*/
#define SYNC_MESG_HEADER_LEN 4
#define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
__u8 nr_conns;
__u8 syncid;
__u16 size;
@@ -113,9 +249,16 @@ struct ip_vs_sync_mesg {
/* ip_vs_sync_conn entries start here */
};
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+ __u8 reserved; /* must be zero */
+ __u8 syncid;
+ __u16 size;
+ __u8 nr_conns;
+ __s8 version; /* SYNC_PROTO_VER */
+ __u16 spare;
+ /* ip_vs_sync_conn entries start here */
+};
struct ip_vs_sync_buff {
struct list_head list;
@@ -127,28 +270,6 @@ struct ip_vs_sync_buff {
unsigned char *end;
};
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
/* multicast addr */
static struct sockaddr_in mcast_addr = {
.sin_family = AF_INET,
@@ -156,41 +277,71 @@ static struct sockaddr_in mcast_addr = {
.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
};
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+ ho->init_seq = get_unaligned_be32(&no->init_seq);
+ ho->delta = get_unaligned_be32(&no->delta);
+ ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
+
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+ put_unaligned_be32(ho->init_seq, &no->init_seq);
+ put_unaligned_be32(ho->delta, &no->delta);
+ put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&ip_vs_sync_lock);
- if (list_empty(&ip_vs_sync_queue)) {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (list_empty(&ipvs->sync_queue)) {
sb = NULL;
} else {
- sb = list_entry(ip_vs_sync_queue.next,
+ sb = list_entry(ipvs->sync_queue.next,
struct ip_vs_sync_buff,
list);
list_del(&sb->list);
}
- spin_unlock_bh(&ip_vs_sync_lock);
+ spin_unlock_bh(&ipvs->sync_lock);
return sb;
}
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
- if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
kfree(sb);
return NULL;
}
+ sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */
+ sb->mesg->version = SYNC_PROTO_VER;
+ sb->mesg->syncid = ipvs->master_syncid;
+ sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
sb->mesg->nr_conns = 0;
- sb->mesg->syncid = ip_vs_master_syncid;
- sb->mesg->size = 4;
- sb->head = (unsigned char *)sb->mesg + 4;
- sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+ sb->mesg->spare = 0;
+ sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+ sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
sb->firstuse = jiffies;
return sb;
}
@@ -201,14 +352,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
kfree(sb);
}
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
{
- spin_lock(&ip_vs_sync_lock);
- if (ip_vs_sync_state & IP_VS_STATE_MASTER)
- list_add_tail(&sb->list, &ip_vs_sync_queue);
+ struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+ spin_lock(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&sb->list, &ipvs->sync_queue);
else
ip_vs_sync_buff_release(sb);
- spin_unlock(&ip_vs_sync_lock);
+ spin_unlock(&ipvs->sync_lock);
}
/*
@@ -216,36 +369,101 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
* than the specified time or the specified time is zero.
*/
static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&curr_sb_lock);
- if (curr_sb && (time == 0 ||
- time_before(jiffies - curr_sb->firstuse, time))) {
- sb = curr_sb;
- curr_sb = NULL;
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ if (ipvs->sync_buff && (time == 0 ||
+ time_before(jiffies - ipvs->sync_buff->firstuse, time))) {
+ sb = ipvs->sync_buff;
+ ipvs->sync_buff = NULL;
} else
sb = NULL;
- spin_unlock_bh(&curr_sb_lock);
+ spin_unlock_bh(&ipvs->sync_buff_lock);
return sb;
}
+/*
+ * Switch mode from sending version 0 or 1
+ * - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!ipvs->sync_state & IP_VS_STATE_MASTER)
+ return;
+ if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+ return;
+
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ /* Buffer empty ? then let buf_create do the job */
+ if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
+ kfree(ipvs->sync_buff);
+ ipvs->sync_buff = NULL;
+ } else {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&ipvs->sync_buff->list,
+ &ipvs->sync_queue);
+ else
+ ip_vs_sync_buff_release(ipvs->sync_buff);
+ spin_unlock_bh(&ipvs->sync_lock);
+ }
+ spin_unlock_bh(&ipvs->sync_buff_lock);
+}
/*
+ * Create a new sync buffer for Version 0 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+{
+ struct ip_vs_sync_buff *sb;
+ struct ip_vs_sync_mesg_v0 *mesg;
+
+ if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+ return NULL;
+
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
+ kfree(sb);
+ return NULL;
+ }
+ mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+ mesg->nr_conns = 0;
+ mesg->syncid = ipvs->master_syncid;
+ mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+ sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+ sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+ sb->firstuse = jiffies;
+ return sb;
+}
+
+/*
+ * Version 0 , could be switched in by sys_ctl.
* Add an ip_vs_conn information into the current sync_buff.
- * Called by ip_vs_in.
*/
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
{
- struct ip_vs_sync_mesg *m;
- struct ip_vs_sync_conn *s;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg_v0 *m;
+ struct ip_vs_sync_conn_v0 *s;
int len;
- spin_lock(&curr_sb_lock);
- if (!curr_sb) {
- if (!(curr_sb=ip_vs_sync_buff_create())) {
- spin_unlock(&curr_sb_lock);
+ if (unlikely(cp->af != AF_INET))
+ return;
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ return;
+
+ spin_lock(&ipvs->sync_buff_lock);
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff =
+ ip_vs_sync_buff_create_v0(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
@@ -253,10 +471,11 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
- m = curr_sb->mesg;
- s = (struct ip_vs_sync_conn *)curr_sb->head;
+ m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+ s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
/* copy members */
+ s->reserved = 0;
s->protocol = cp->protocol;
s->cport = cp->cport;
s->vport = cp->vport;
@@ -274,83 +493,366 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
m->nr_conns++;
m->size += len;
- curr_sb->head += len;
+ ipvs->sync_buff->head += len;
/* check if there is a space for next one */
- if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
- sb_queue_tail(curr_sb);
- curr_sb = NULL;
+ if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
}
- spin_unlock(&curr_sb_lock);
+ spin_unlock(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
if (cp->control)
- ip_vs_sync_conn(cp->control);
+ ip_vs_sync_conn(net, cp->control);
+}
+
+/*
+ * Add an ip_vs_conn information into the current sync_buff.
+ * Called by ip_vs_in.
+ * Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m;
+ union ip_vs_sync_conn *s;
+ __u8 *p;
+ unsigned int len, pe_name_len, pad;
+
+ /* Handle old version of the protocol */
+ if (ipvs->sysctl_sync_ver == 0) {
+ ip_vs_sync_conn_v0(net, cp);
+ return;
+ }
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ goto control;
+sloop:
+ /* Sanity checks */
+ pe_name_len = 0;
+ if (cp->pe_data_len) {
+ if (!cp->pe_data || !cp->dest) {
+ IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+ return;
+ }
+ pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+ }
+
+ spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ len = sizeof(struct ip_vs_sync_v6);
+ else
+#endif
+ len = sizeof(struct ip_vs_sync_v4);
+
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+ len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+ if (cp->pe_data_len)
+ len += cp->pe_data_len + 2; /* + Param hdr field */
+ if (pe_name_len)
+ len += pe_name_len + 2;
+
+ /* check if there is a space for this one */
+ pad = 0;
+ if (ipvs->sync_buff) {
+ pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+ if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
+ pad = 0;
+ }
+ }
+
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
+ pr_err("ip_vs_sync_buff_create failed.\n");
+ return;
+ }
+ }
+
+ m = ipvs->sync_buff->mesg;
+ p = ipvs->sync_buff->head;
+ ipvs->sync_buff->head += pad + len;
+ m->size += pad + len;
+ /* Add ev. padding from prev. sync_conn */
+ while (pad--)
+ *(p++) = 0;
+
+ s = (union ip_vs_sync_conn *)p;
+
+ /* Set message type & copy members */
+ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+ s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */
+ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+ s->v4.state = htons(cp->state);
+ s->v4.protocol = cp->protocol;
+ s->v4.cport = cp->cport;
+ s->v4.vport = cp->vport;
+ s->v4.dport = cp->dport;
+ s->v4.fwmark = htonl(cp->fwmark);
+ s->v4.timeout = htonl(cp->timeout / HZ);
+ m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6) {
+ p += sizeof(struct ip_vs_sync_v6);
+ ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+ ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+ ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+ } else
+#endif
+ {
+ p += sizeof(struct ip_vs_sync_v4); /* options ptr */
+ s->v4.caddr = cp->caddr.ip;
+ s->v4.vaddr = cp->vaddr.ip;
+ s->v4.daddr = cp->daddr.ip;
+ }
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+ *(p++) = IPVS_OPT_SEQ_DATA;
+ *(p++) = sizeof(struct ip_vs_sync_conn_options);
+ hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+ p += sizeof(struct ip_vs_seq);
+ hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+ p += sizeof(struct ip_vs_seq);
+ }
+ /* Handle pe data */
+ if (cp->pe_data_len && cp->pe_data) {
+ *(p++) = IPVS_OPT_PE_DATA;
+ *(p++) = cp->pe_data_len;
+ memcpy(p, cp->pe_data, cp->pe_data_len);
+ p += cp->pe_data_len;
+ if (pe_name_len) {
+ /* Add PE_NAME */
+ *(p++) = IPVS_OPT_PE_NAME;
+ *(p++) = pe_name_len;
+ memcpy(p, cp->pe->name, pe_name_len);
+ p += pe_name_len;
+ }
+ }
+
+ spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+ /* synchronize its controller if it has */
+ cp = cp->control;
+ if (!cp)
+ return;
+ /*
+ * Reduce sync rate for templates
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+ int pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+ return;
+ }
+ goto sloop;
}
+/*
+ * fill_param used by version 1
+ */
static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
- const union nf_inet_addr *caddr, __be16 cport,
- const union nf_inet_addr *vaddr, __be16 vport,
- struct ip_vs_conn_param *p)
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+ struct ip_vs_conn_param *p,
+ __u8 *pe_data, unsigned int pe_data_len,
+ __u8 *pe_name, unsigned int pe_name_len)
{
- /* XXX: Need to take into account persistence engine */
- ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+ (const union nf_inet_addr *)&sc->v6.caddr,
+ sc->v6.cport,
+ (const union nf_inet_addr *)&sc->v6.vaddr,
+ sc->v6.vport, p);
+ else
+#endif
+ ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+ (const union nf_inet_addr *)&sc->v4.caddr,
+ sc->v4.cport,
+ (const union nf_inet_addr *)&sc->v4.vaddr,
+ sc->v4.vport, p);
+ /* Handle pe data */
+ if (pe_data_len) {
+ if (pe_name_len) {
+ char buff[IP_VS_PENAME_MAXLEN+1];
+
+ memcpy(buff, pe_name, pe_name_len);
+ buff[pe_name_len]=0;
+ p->pe = __ip_vs_pe_getbyname(buff);
+ if (!p->pe) {
+ IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+ buff);
+ return 1;
+ }
+ } else {
+ IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+ return 1;
+ }
+
+ p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+ if (!p->pe_data) {
+ if (p->pe->module)
+ module_put(p->pe->module);
+ return -ENOMEM;
+ }
+ memcpy(p->pe_data, pe_data, pe_data_len);
+ p->pe_data_len = pe_data_len;
+ }
return 0;
}
/*
- * Process received multicast message and create the corresponding
- * ip_vs_conn entries.
+ * Connection Add / Update.
+ * Common for version 0 and 1 reception of backup sync_conns.
+ * Param: ...
+ * timeout is in sec.
*/
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+ unsigned int flags, unsigned int state,
+ unsigned int protocol, unsigned int type,
+ const union nf_inet_addr *daddr, __be16 dport,
+ unsigned long timeout, __u32 fwmark,
+ struct ip_vs_sync_conn_options *opt)
{
- struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
- struct ip_vs_sync_conn *s;
- struct ip_vs_sync_conn_options *opt;
- struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp;
struct ip_vs_dest *dest;
- struct ip_vs_conn_param param;
- char *p;
- int i;
+ struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- if (buflen < sizeof(struct ip_vs_sync_mesg)) {
- IP_VS_ERR_RL("sync message header too short\n");
- return;
- }
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ cp = ip_vs_conn_in_get(param);
+ else
+ cp = ip_vs_ct_in_get(param);
- /* Convert size back to host byte order */
- m->size = ntohs(m->size);
+ if (cp && param->pe_data) /* Free pe_data */
+ kfree(param->pe_data);
+ if (!cp) {
+ /*
+ * Find the appropriate destination for the connection.
+ * If it is not found the connection will remain unbound
+ * but still handled.
+ */
+ dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+ param->vport, protocol, fwmark);
- if (buflen != m->size) {
- IP_VS_ERR_RL("bogus sync message size\n");
- return;
+ /* Set the approprite ativity flag */
+ if (protocol == IPPROTO_TCP) {
+ if (state != IP_VS_TCP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ } else if (protocol == IPPROTO_SCTP) {
+ if (state != IP_VS_SCTP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ if (!cp) {
+ if (param->pe_data)
+ kfree(param->pe_data);
+ IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+ return;
+ }
+ } else if (!cp->dest) {
+ dest = ip_vs_try_bind_dest(cp);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+ (cp->state != state)) {
+ /* update active/inactive flag for the connection */
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags |= IP_VS_CONN_F_INACTIVE;
+ } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state == IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_inc(&dest->activeconns);
+ atomic_dec(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+ (cp->state != state)) {
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
}
- /* SyncID sanity check */
- if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
- IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
- m->syncid);
- return;
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+ atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+ * For Ver 0 messages style
+ * - Not possible to recover the right timeout for templates
+ * - can not find the right fwmark
+ * virtual service. If needed, we can do it for
+ * non-fwmark persistent services.
+ * Ver 1 messages style.
+ * - No problem.
+ */
+ if (timeout) {
+ if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+ timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+ cp->timeout = timeout*HZ;
+ } else {
+ struct ip_vs_proto_data *pd;
+
+ pd = ip_vs_proto_data_get(net, protocol);
+ if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+ cp->timeout = pd->timeout_table[state];
+ else
+ cp->timeout = (3*60*HZ);
}
+ ip_vs_conn_put(cp);
+}
- p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+/*
+ * Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+ const size_t buflen)
+{
+ struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+ struct ip_vs_sync_conn_v0 *s;
+ struct ip_vs_sync_conn_options *opt;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ char *p;
+ int i;
+
+ p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
for (i=0; i<m->nr_conns; i++) {
unsigned flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
return;
}
- s = (struct ip_vs_sync_conn *) p;
+ s = (struct ip_vs_sync_conn_v0 *) p;
flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
flags &= ~IP_VS_CONN_F_HASHED;
if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
p += FULL_CONN_SIZE;
if (p > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn options in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
return;
}
} else {
@@ -362,118 +864,286 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
pp = ip_vs_proto_get(s->protocol);
if (!pp) {
- IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
s->protocol);
continue;
}
if (state >= pp->num_states) {
- IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
pp->name, state);
continue;
}
} else {
/* protocol in templates is not used for state/timeout */
- pp = NULL;
if (state > 0) {
- IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
state);
state = 0;
}
}
- {
- if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
- (union nf_inet_addr *)&s->caddr,
- s->cport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport, &param)) {
- pr_err("ip_vs_conn_fill_param_sync failed");
- return;
+ ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+ (const union nf_inet_addr *)&s->caddr,
+ s->cport,
+ (const union nf_inet_addr *)&s->vaddr,
+ s->vport, &param);
+
+ /* Send timeout as Zero */
+ ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+ (union nf_inet_addr *)&s->daddr, s->dport,
+ 0, 0, opt);
+ }
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+ __u32 *opt_flags,
+ struct ip_vs_sync_conn_options *opt)
+{
+ struct ip_vs_sync_conn_options *topt;
+
+ topt = (struct ip_vs_sync_conn_options *)p;
+
+ if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+ IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+ return -EINVAL;
+ }
+ if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+ IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+ return -EINVAL;
+ }
+ ntoh_seq(&topt->in_seq, &opt->in_seq);
+ ntoh_seq(&topt->out_seq, &opt->out_seq);
+ *opt_flags |= IPVS_OPT_F_SEQ_DATA;
+ return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+ __u8 **data, unsigned int maxlen,
+ __u32 *opt_flags, __u32 flag)
+{
+ if (plen > maxlen) {
+ IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+ return -EINVAL;
+ }
+ if (*opt_flags & flag) {
+ IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+ return -EINVAL;
+ }
+ *data_len = plen;
+ *data = p;
+ *opt_flags |= flag;
+ return 0;
+}
+/*
+ * Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+ struct ip_vs_sync_conn_options opt;
+ union ip_vs_sync_conn *s;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ __u32 flags;
+ unsigned int af, state, pe_data_len=0, pe_name_len=0;
+ __u8 *pe_data=NULL, *pe_name=NULL;
+ __u32 opt_flags=0;
+ int retc=0;
+
+ s = (union ip_vs_sync_conn *) p;
+
+ if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+ af = AF_INET6;
+ p += sizeof(struct ip_vs_sync_v6);
+#else
+ IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+ retc = 10;
+ goto out;
+#endif
+ } else if (!s->v4.type) {
+ af = AF_INET;
+ p += sizeof(struct ip_vs_sync_v4);
+ } else {
+ return -10;
+ }
+ if (p > msg_end)
+ return -20;
+
+ /* Process optional params check Type & Len. */
+ while (p < msg_end) {
+ int ptype;
+ int plen;
+
+ if (p+2 > msg_end)
+ return -30;
+ ptype = *(p++);
+ plen = *(p++);
+
+ if (!plen || ((p + plen) > msg_end))
+ return -40;
+ /* Handle seq option p = param data */
+ switch (ptype & ~IPVS_OPT_F_PARAM) {
+ case IPVS_OPT_SEQ_DATA:
+ if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+ return -50;
+ break;
+
+ case IPVS_OPT_PE_DATA:
+ if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+ IP_VS_PEDATA_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_DATA))
+ return -60;
+ break;
+
+ case IPVS_OPT_PE_NAME:
+ if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+ IP_VS_PENAME_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_NAME))
+ return -70;
+ break;
+
+ default:
+ /* Param data mandatory ? */
+ if (!(ptype & IPVS_OPT_F_PARAM)) {
+ IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+ ptype & ~IPVS_OPT_F_PARAM);
+ retc = 20;
+ goto out;
}
- if (!(flags & IP_VS_CONN_F_TEMPLATE))
- cp = ip_vs_conn_in_get(&param);
- else
- cp = ip_vs_ct_in_get(&param);
}
- if (!cp) {
- /*
- * Find the appropriate destination for the connection.
- * If it is not found the connection will remain unbound
- * but still handled.
- */
- dest = ip_vs_find_dest(AF_INET,
- (union nf_inet_addr *)&s->daddr,
- s->dport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport,
- s->protocol);
- /* Set the approprite ativity flag */
- if (s->protocol == IPPROTO_TCP) {
- if (state != IP_VS_TCP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
- } else if (s->protocol == IPPROTO_SCTP) {
- if (state != IP_VS_SCTP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
+ p += plen; /* Next option */
+ }
+
+ /* Get flags and Mask off unsupported */
+ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+ flags |= IP_VS_CONN_F_SYNC;
+ state = ntohs(s->v4.state);
+
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+ pp = ip_vs_proto_get(s->v4.protocol);
+ if (!pp) {
+ IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+ s->v4.protocol);
+ retc = 30;
+ goto out;
+ }
+ if (state >= pp->num_states) {
+ IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+ pp->name, state);
+ retc = 40;
+ goto out;
+ }
+ } else {
+ /* protocol in templates is not used for state/timeout */
+ if (state > 0) {
+ IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+ state);
+ state = 0;
+ }
+ }
+ if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+ pe_data_len, pe_name, pe_name_len)) {
+ retc = 50;
+ goto out;
+ }
+ /* If only IPv4, just silent skip IPv6 */
+ if (af == AF_INET)
+ ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+ (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+ ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#ifdef CONFIG_IP_VS_IPV6
+ else
+ ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+ (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+ ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#endif
+ return 0;
+ /* Error exit */
+out:
+ IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+ return retc;
+
+}
+/*
+ * Process received multicast message and create the corresponding
+ * ip_vs_conn entries.
+ * Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+ const size_t buflen)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+ __u8 *p, *msg_end;
+ int i, nr_conns;
+
+ if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+ IP_VS_DBG(2, "BACKUP, message header too short\n");
+ return;
+ }
+ /* Convert size back to host byte order */
+ m2->size = ntohs(m2->size);
+
+ if (buflen != m2->size) {
+ IP_VS_DBG(2, "BACKUP, bogus message size\n");
+ return;
+ }
+ /* SyncID sanity check */
+ if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+ IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+ return;
+ }
+ /* Handle version 1 message */
+ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+ && (m2->spare == 0)) {
+
+ msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+ nr_conns = m2->nr_conns;
+
+ for (i=0; i<nr_conns; i++) {
+ union ip_vs_sync_conn *s;
+ unsigned size;
+ int retc;
+
+ p = msg_end;
+ if (p + sizeof(s->v4) > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+ return;
}
- cp = ip_vs_conn_new(&param,
- (union nf_inet_addr *)&s->daddr,
- s->dport, flags, dest);
- if (dest)
- atomic_dec(&dest->refcnt);
- if (!cp) {
- pr_err("ip_vs_conn_new failed\n");
+ s = (union ip_vs_sync_conn *)p;
+ size = ntohs(s->v4.ver_size) & SVER_MASK;
+ msg_end = p + size;
+ /* Basic sanity checks */
+ if (msg_end > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
return;
}
- } else if (!cp->dest) {
- dest = ip_vs_try_bind_dest(cp);
- if (dest)
- atomic_dec(&dest->refcnt);
- } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
- (cp->state != state)) {
- /* update active/inactive flag for the connection */
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_TCP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags |= IP_VS_CONN_F_INACTIVE;
- } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state == IP_VS_TCP_S_ESTABLISHED)) {
- atomic_inc(&dest->activeconns);
- atomic_dec(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+ ntohs(s->v4.ver_size) >> SVER_SHIFT);
+ return;
}
- } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
- (cp->state != state)) {
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_SCTP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ /* Process a single sync_conn */
+ retc = ip_vs_proc_sync_conn(net, p, msg_end);
+ if (retc < 0) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+ retc);
+ return;
}
+ /* Make sure we have 32 bit alignment */
+ msg_end = p + ((size + 3) & ~3);
}
-
- if (opt)
- memcpy(&cp->in_seq, opt, sizeof(*opt));
- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
- cp->state = state;
- cp->old_state = cp->state;
- /*
- * We can not recover the right timeout for templates
- * in all cases, we can not find the right fwmark
- * virtual service. If needed, we can do it for
- * non-fwmark persistent services.
- */
- if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
- cp->timeout = pp->timeout_table[state];
- else
- cp->timeout = (3*60*HZ);
- ip_vs_conn_put(cp);
+ } else {
+ /* Old type of message */
+ ip_vs_process_message_v0(net, buffer, buflen);
+ return;
}
}
@@ -511,8 +1181,10 @@ static int set_mcast_if(struct sock *sk, char *ifname)
{
struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
+ struct net *net = sock_net(sk);
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1203,33 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* Set the maximum length of sync message according to the
* specified interface's MTU.
*/
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct net_device *dev;
int num;
if (sync_state == IP_VS_STATE_MASTER) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+ if (!dev)
return -ENODEV;
num = (dev->mtu - sizeof(struct iphdr) -
sizeof(struct udphdr) -
SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
- sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+ ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
IP_VS_DBG(7, "setting the maximum length of sync sending "
- "message %d.\n", sync_send_mesg_maxlen);
+ "message %d.\n", ipvs->send_mesg_maxlen);
} else if (sync_state == IP_VS_STATE_BACKUP) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+ if (!dev)
return -ENODEV;
- sync_recv_mesg_maxlen = dev->mtu -
+ ipvs->recv_mesg_maxlen = dev->mtu -
sizeof(struct iphdr) - sizeof(struct udphdr);
IP_VS_DBG(7, "setting the maximum length of sync receiving "
- "message %d.\n", sync_recv_mesg_maxlen);
+ "message %d.\n", ipvs->recv_mesg_maxlen);
}
return 0;
@@ -569,6 +1244,7 @@ static int set_sync_mesg_maxlen(int sync_state)
static int
join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
{
+ struct net *net = sock_net(sk);
struct ip_mreqn mreq;
struct net_device *dev;
int ret;
@@ -576,7 +1252,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -593,11 +1270,13 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
static int bind_mcastif_addr(struct socket *sock, char *ifname)
{
+ struct net *net = sock_net(sock->sk);
struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1298,20 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
/*
* Set up sending multicast socket over UDP
*/
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+ result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -640,7 +1320,7 @@ static struct socket * make_send_sock(void)
set_mcast_loop(sock->sk, 0);
set_mcast_ttl(sock->sk, 1);
- result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+ result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error binding address of the mcast interface\n");
goto error;
@@ -664,13 +1344,14 @@ static struct socket * make_send_sock(void)
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
@@ -689,7 +1370,7 @@ static struct socket * make_receive_sock(void)
/* join the multicast group */
result = join_mcast_group(sock->sk,
(struct in_addr *) &mcast_addr.sin_addr,
- ip_vs_backup_mcast_ifn);
+ ipvs->backup_mcast_ifn);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
@@ -760,20 +1441,21 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
static int sync_thread_master(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
struct ip_vs_sync_buff *sb;
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+ ipvs->master_mcast_ifn, ipvs->master_syncid);
while (!kthread_should_stop()) {
- while ((sb = sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs))) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
}
- /* check if entries stay in curr_sb for 2 seconds */
- sb = get_curr_sync_buff(2 * HZ);
+ /* check if entries stay in ipvs->sync_buff for 2 seconds */
+ sb = get_curr_sync_buff(ipvs, 2 * HZ);
if (sb) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
@@ -783,14 +1465,13 @@ static int sync_thread_master(void *data)
}
/* clean up the sync_buff queue */
- while ((sb=sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs)))
ip_vs_sync_buff_release(sb);
- }
/* clean up the current sync_buff */
- if ((sb = get_curr_sync_buff(0))) {
+ sb = get_curr_sync_buff(ipvs, 0);
+ if (sb)
ip_vs_sync_buff_release(sb);
- }
/* release the sending multicast socket */
sock_release(tinfo->sock);
@@ -803,11 +1484,12 @@ static int sync_thread_master(void *data)
static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+ ipvs->backup_mcast_ifn, ipvs->backup_syncid);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1499,7 @@ static int sync_thread_backup(void *data)
/* do we have data now? */
while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
- sync_recv_mesg_maxlen);
+ ipvs->recv_mesg_maxlen);
if (len <= 0) {
pr_err("receiving message error\n");
break;
@@ -826,7 +1508,7 @@ static int sync_thread_backup(void *data)
/* disable bottom half, because it accesses the data
shared by softirq while getting/creating conns */
local_bh_disable();
- ip_vs_process_message(tinfo->buf, len);
+ ip_vs_process_message(tinfo->net, tinfo->buf, len);
local_bh_enable();
}
}
@@ -840,41 +1522,42 @@ static int sync_thread_backup(void *data)
}
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
{
struct ip_vs_sync_thread_data *tinfo;
struct task_struct **realtask, *task;
struct socket *sock;
+ struct netns_ipvs *ipvs = net_ipvs(net);
char *name, *buf = NULL;
int (*threadfn)(void *data);
int result = -ENOMEM;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
- sizeof(struct ip_vs_sync_conn));
+ sizeof(struct ip_vs_sync_conn_v0));
if (state == IP_VS_STATE_MASTER) {
- if (sync_master_thread)
+ if (ipvs->master_thread)
return -EEXIST;
- strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_master_mcast_ifn));
- ip_vs_master_syncid = syncid;
- realtask = &sync_master_thread;
- name = "ipvs_syncmaster";
+ strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->master_mcast_ifn));
+ ipvs->master_syncid = syncid;
+ realtask = &ipvs->master_thread;
+ name = "ipvs_master:%d";
threadfn = sync_thread_master;
- sock = make_send_sock();
+ sock = make_send_sock(net);
} else if (state == IP_VS_STATE_BACKUP) {
- if (sync_backup_thread)
+ if (ipvs->backup_thread)
return -EEXIST;
- strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_backup_mcast_ifn));
- ip_vs_backup_syncid = syncid;
- realtask = &sync_backup_thread;
- name = "ipvs_syncbackup";
+ strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->backup_mcast_ifn));
+ ipvs->backup_syncid = syncid;
+ realtask = &ipvs->backup_thread;
+ name = "ipvs_backup:%d";
threadfn = sync_thread_backup;
- sock = make_receive_sock();
+ sock = make_receive_sock(net);
} else {
return -EINVAL;
}
@@ -884,9 +1567,9 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
goto out;
}
- set_sync_mesg_maxlen(state);
+ set_sync_mesg_maxlen(net, state);
if (state == IP_VS_STATE_BACKUP) {
- buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+ buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
if (!buf)
goto outsocket;
}
@@ -895,10 +1578,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
if (!tinfo)
goto outbuf;
+ tinfo->net = net;
tinfo->sock = sock;
tinfo->buf = buf;
- task = kthread_run(threadfn, tinfo, name);
+ task = kthread_run(threadfn, tinfo, name, ipvs->gen);
if (IS_ERR(task)) {
result = PTR_ERR(task);
goto outtinfo;
@@ -906,7 +1590,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
/* mark as active */
*realtask = task;
- ip_vs_sync_state |= state;
+ ipvs->sync_state |= state;
/* increase the module use count */
ip_vs_use_count_inc();
@@ -924,16 +1608,18 @@ out:
}
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
if (state == IP_VS_STATE_MASTER) {
- if (!sync_master_thread)
+ if (!ipvs->master_thread)
return -ESRCH;
pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(sync_master_thread));
+ task_pid_nr(ipvs->master_thread));
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1627,21 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
- spin_lock_bh(&ip_vs_sync_lock);
- ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock_bh(&ip_vs_sync_lock);
- kthread_stop(sync_master_thread);
- sync_master_thread = NULL;
+ spin_lock_bh(&ipvs->sync_lock);
+ ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+ spin_unlock_bh(&ipvs->sync_lock);
+ kthread_stop(ipvs->master_thread);
+ ipvs->master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!sync_backup_thread)
+ if (!ipvs->backup_thread)
return -ESRCH;
pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(sync_backup_thread));
+ task_pid_nr(ipvs->backup_thread));
- ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
- kthread_stop(sync_backup_thread);
- sync_backup_thread = NULL;
+ ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+ kthread_stop(ipvs->backup_thread);
+ ipvs->backup_thread = NULL;
} else {
return -EINVAL;
}
@@ -965,3 +1651,42 @@ int stop_sync_thread(int state)
return 0;
}
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->sync_queue);
+ spin_lock_init(&ipvs->sync_lock);
+ spin_lock_init(&ipvs->sync_buff_lock);
+
+ ipvs->sync_mcast_addr.sin_family = AF_INET;
+ ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+ ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+ return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+ stop_sync_thread(net, IP_VS_STATE_MASTER);
+ stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+ .init = __ip_vs_sync_init,
+ .exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+ return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+ unregister_pernet_subsys(&ipvs_sync_ops);
+}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 5325a3fbe4ac..1f2a4e35fb11 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -175,7 +175,6 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
.fl4_tos = RT_TOS(iph->tos),
.mark = skb->mark,
};
- struct rtable *rt;
if (ip_route_output_key(net, &rt, &fl))
return 0;
@@ -390,7 +389,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +443,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -543,7 +543,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
"ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +659,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -773,8 +774,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
df |= (old_iph->frag_off & htons(IP_DF));
- if ((old_iph->frag_off & htons(IP_DF))
- && mtu < ntohs(old_iph->tot_len)) {
+ if ((old_iph->frag_off & htons(IP_DF) &&
+ mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -886,7 +887,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
- if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+ if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+ !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -991,7 +993,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+ if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1158,7 +1161,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -1272,7 +1276,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644
index 000000000000..4e99cca61612
--- /dev/null
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -0,0 +1,82 @@
+/*
+ * broadcast connection tracking helper
+ *
+ * (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout)
+{
+ struct nf_conntrack_expect *exp;
+ struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt = skb_rtable(skb);
+ struct in_device *in_dev;
+ struct nf_conn_help *help = nfct_help(ct);
+ __be32 mask = 0;
+
+ /* we're only interested in locally generated packets */
+ if (skb->sk == NULL)
+ goto out;
+ if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+ goto out;
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ goto out;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
+ if (in_dev != NULL) {
+ for_primary_ifa(in_dev) {
+ if (ifa->ifa_broadcast == iph->daddr) {
+ mask = ifa->ifa_mask;
+ break;
+ }
+ } endfor_ifa(in_dev);
+ }
+ rcu_read_unlock();
+
+ if (mask == 0)
+ goto out;
+
+ exp = nf_ct_expect_alloc(ct);
+ if (exp == NULL)
+ goto out;
+
+ exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+ exp->mask.src.u3.ip = mask;
+ exp->mask.src.u.udp.port = htons(0xFFFF);
+
+ exp->expectfn = NULL;
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
+ exp->helper = NULL;
+
+ nf_ct_expect_related(exp);
+ nf_ct_expect_put(exp);
+
+ nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+ return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e61511929c66..1909311c392a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -43,6 +43,7 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -282,6 +283,11 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
static void death_by_timeout(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
if (!test_bit(IPS_DYING_BIT, &ct->status) &&
unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
+ struct nf_conn_tstamp *tstamp;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -486,8 +493,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
ct->timeout.expires += jiffies;
add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use);
- set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ tstamp->start = ktime_to_ns(skb->tstamp);
+ }
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
*/
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
- sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -1185,6 +1202,11 @@ struct __nf_ct_flush_report {
static int kill_report(struct nf_conn *i, void *data)
{
struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(i);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
/* If we fail to deliver the event, death_by_timeout() will retry */
if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1201,9 +1223,9 @@ static int kill_all(struct nf_conn *i, void *data)
return 1;
}
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
{
- if (vmalloced)
+ if (is_vmalloc_addr(hash))
vfree(hash);
else
free_pages((unsigned long)hash,
@@ -1270,8 +1292,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
goto i_see_dead_people;
}
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
@@ -1300,21 +1321,18 @@ void nf_conntrack_cleanup(struct net *net)
}
}
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
size_t sz;
- *vmalloced = 0;
-
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
if (!hash) {
- *vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL);
@@ -1330,7 +1348,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{
- int i, bucket, vmalloced, old_vmalloced;
+ int i, bucket;
unsigned int hashsize, old_size;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
@@ -1347,7 +1365,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hashsize)
return -EINVAL;
- hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+ hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash)
return -ENOMEM;
@@ -1369,15 +1387,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
}
}
old_size = init_net.ct.htable_size;
- old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
- init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
spin_unlock_bh(&nf_conntrack_lock);
- nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+ nf_ct_free_hashtable(old_hash, old_size);
return 0;
}
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1490,8 +1506,7 @@ static int nf_conntrack_init_net(struct net *net)
}
net->ct.htable_size = nf_conntrack_htable_size;
- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
- &net->ct.hash_vmalloc, 1);
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1503,6 +1518,9 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_acct_init(net);
if (ret < 0)
goto err_acct;
+ ret = nf_conntrack_tstamp_init(net);
+ if (ret < 0)
+ goto err_tstamp;
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
@@ -1510,12 +1528,13 @@ static int nf_conntrack_init_net(struct net *net)
return 0;
err_ecache:
+ nf_conntrack_tstamp_fini(net);
+err_tstamp:
nf_conntrack_acct_fini(net);
err_acct:
nf_conntrack_expect_fini(net);
err_expect:
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
err_hash:
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
err_cache:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index a20fb0bd1efe..cd1e8e0970f2 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -319,7 +319,8 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
const struct nf_conntrack_expect_policy *p;
unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
- atomic_inc(&exp->use);
+ /* two references : one for hash insert, one for the timer */
+ atomic_add(2, &exp->use);
if (master_help) {
hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
(unsigned long)exp);
if (master_help) {
- p = &master_help->helper->expect_policy[exp->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[exp->class];
exp->timeout.expires = jiffies + p->timeout * HZ;
}
add_timer(&exp->timeout);
- atomic_inc(&exp->use);
NF_CT_STAT_INC(net, expect_create);
}
@@ -369,7 +372,10 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
if (!del_timer(&i->timeout))
return 0;
- p = &master_help->helper->expect_policy[i->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[i->class];
i->timeout.expires = jiffies + p->timeout * HZ;
add_timer(&i->timeout);
return 1;
@@ -407,7 +413,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
/* Will be over limit? */
if (master_help) {
- p = &master_help->helper->expect_policy[expect->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -491,11 +500,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
@@ -630,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
}
net->ct.expect_count = 0;
- net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
- &net->ct.expect_vmalloc, 0);
+ net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
if (net->ct.expect_hash == NULL)
goto err1;
@@ -653,8 +661,7 @@ err3:
if (net_eq(net, &init_net))
kmem_cache_destroy(nf_ct_expect_cachep);
err2:
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
err1:
return err;
}
@@ -666,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
}
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index bd82450c193f..80a23ed62bb0 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -140,15 +140,16 @@ static void update_alloc_size(struct nf_ct_ext_type *type)
/* This assumes that extended areas in conntrack for the types
whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
for (i = min; i <= max; i++) {
- t1 = nf_ct_ext_types[i];
+ t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (!t1)
continue;
- t1->alloc_size = sizeof(struct nf_ct_ext)
- + ALIGN(sizeof(struct nf_ct_ext), t1->align)
- + t1->len;
+ t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+ t1->len;
for (j = 0; j < NF_CT_EXT_NUM; j++) {
- t2 = nf_ct_ext_types[j];
+ t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (t2 == NULL || t2 == t1 ||
(t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
continue;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 59e1a4cd4e8b..1bdfea357955 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
static struct hlist_head *nf_ct_helper_hash __read_mostly;
static unsigned int nf_ct_helper_hsize __read_mostly;
static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
/* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
struct nf_conn_help *help = nfct_help(ct);
- if (help && help->helper == me) {
+ if (help && rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
rcu_assign_pointer(help->helper, NULL);
}
@@ -210,7 +212,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
hlist_for_each_entry_safe(exp, n, next,
&net->ct.expect_hash[i], hnode) {
struct nf_conn_help *help = nfct_help(exp->master);
- if ((help->helper == me || exp->helper == me) &&
+ if ((rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me || exp->helper == me) &&
del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@ int nf_conntrack_helper_init(void)
int err;
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
- nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
- &nf_ct_helper_vmalloc, 0);
+ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
@@ -273,14 +277,12 @@ int nf_conntrack_helper_init(void)
return 0;
err1:
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
return err;
}
void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
}
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index aadde018a072..4c8f30a3d6d2 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -18,14 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
MODULE_ALIAS_NFCT_HELPER("netbios_ns");
static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
-static int help(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
- struct nf_conntrack_expect *exp;
- struct iphdr *iph = ip_hdr(skb);
- struct rtable *rt = skb_rtable(skb);
- struct in_device *in_dev;
- __be32 mask = 0;
-
- /* we're only interested in locally generated packets */
- if (skb->sk == NULL)
- goto out;
- if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
- goto out;
- if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
- goto out;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(rt->dst.dev);
- if (in_dev != NULL) {
- for_primary_ifa(in_dev) {
- if (ifa->ifa_broadcast == iph->daddr) {
- mask = ifa->ifa_mask;
- break;
- }
- } endfor_ifa(in_dev);
- }
- rcu_read_unlock();
-
- if (mask == 0)
- goto out;
-
- exp = nf_ct_expect_alloc(ct);
- if (exp == NULL)
- goto out;
-
- exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
- exp->mask.src.u3.ip = mask;
- exp->mask.src.u.udp.port = htons(0xFFFF);
-
- exp->expectfn = NULL;
- exp->flags = NF_CT_EXPECT_PERMANENT;
- exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
- exp->helper = NULL;
-
- nf_ct_expect_related(exp);
- nf_ct_expect_put(exp);
-
- nf_ct_refresh(ct, skb, timeout * HZ);
-out:
- return NF_ACCEPT;
-}
-
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
static struct nf_conntrack_helper helper __read_mostly = {
.name = "netbios-ns",
- .tuple.src.l3num = AF_INET,
+ .tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
- .help = help,
+ .help = netbios_ns_help,
.expect_policy = &exp_policy,
};
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2b7eef37875c..b4df3eff4240 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -42,6 +42,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@ nla_put_failure:
return -1;
}
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+ struct nlattr *nest_count;
+ const struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (!tstamp)
+ return 0;
+
+ nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+ if (!nest_count)
+ goto nla_put_failure;
+
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+ if (tstamp->stop != 0) {
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+ cpu_to_be64(tstamp->stop));
+ }
+ nla_nest_end(skb, nest_count);
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
#ifdef CONFIG_NF_CONNTRACK_MARK
static inline int
ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -471,6 +500,18 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
}
static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+ return 0;
+ return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+ return 0;
+#endif
+}
+
+static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -481,6 +522,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ ctnetlink_counters_size(ct)
+ + ctnetlink_timestamp_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (events & (1 << IPCT_DESTROY)) {
if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+ ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0)
goto nla_put_failure;
} else {
if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -760,7 +803,7 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
- enum ctattr_tuple type, u_int8_t l3num)
+ enum ctattr_type type, u_int8_t l3num)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@@ -924,7 +967,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
+ if (nlh->nlmsg_flags & NLM_F_DUMP)
return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
ctnetlink_done);
@@ -1357,6 +1400,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
@@ -1375,6 +1419,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
#endif
+ memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
@@ -1787,7 +1832,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
return netlink_dump_start(ctnl, skb, nlh,
ctnetlink_exp_dump_table,
ctnetlink_exp_done);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index dc7bb74110df..5701c8dd783c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -166,6 +166,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto
int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
+ struct nf_conntrack_l3proto *old;
if (proto->l3proto >= AF_MAX)
return -EBUSY;
@@ -174,7 +175,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
- if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+ old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex));
+ if (old != &nf_conntrack_l3proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -201,7 +204,9 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+ BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
mutex_lock(&nf_ct_proto_mutex);
if (!nf_ct_protos[l4proto->l3proto]) {
/* l3proto may be loaded latter. */
- struct nf_conntrack_l4proto **proto_array;
+ struct nf_conntrack_l4proto __rcu **proto_array;
int i;
proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
}
for (i = 0; i < MAX_NF_CT_PROTO; i++)
- proto_array[i] = &nf_conntrack_l4proto_generic;
+ RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
/* Before making proto_array visible to lockless readers,
* we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
smp_wmb();
nf_ct_protos[l4proto->l3proto] = proto_array;
- } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
- &nf_conntrack_l4proto_generic) {
+ } else if (rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != &nf_conntrack_l4proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -331,7 +338,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+ BUG_ON(rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
nf_ct_l4proto_unregister_sysctl(l4proto);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 5292560d6d4a..9ae57c57c50e 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -452,6 +452,9 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
+ ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+ ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+ ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c6049c2d5ea8..6f4ee70f460b 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -413,6 +413,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
test_bit(SCTP_CID_COOKIE_ACK, map))
return false;
+ memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
new_state = SCTP_CONNTRACK_MAX;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Don't need lock here: this conntrack not in circulation yet */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3fb2b73b24dc..6f38d0e2ea4a 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1066,9 +1066,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
BUG_ON(th == NULL);
/* Don't need lock here: this conntrack not in circulation yet */
- new_state
- = tcp_conntracks[0][get_conntrack_index(th)]
- [TCP_CONNTRACK_NONE];
+ new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- ct->proto.tcp.seen[1].flags = 0;
} else if (nf_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
@@ -1107,7 +1106,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
- ct->proto.tcp.seen[0].td_scale = 0;
/* We assume SACK and liberal window checking to handle
* window scaling */
@@ -1116,13 +1114,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
IP_CT_TCP_FLAG_BE_LIBERAL;
}
- ct->proto.tcp.seen[1].td_end = 0;
- ct->proto.tcp.seen[1].td_maxend = 0;
- ct->proto.tcp.seen[1].td_maxwin = 0;
- ct->proto.tcp.seen[1].td_scale = 0;
-
/* tcp_packet will set them */
- ct->proto.tcp.state = TCP_CONNTRACK_NONE;
ct->proto.tcp.last_index = TCP_NONE_SET;
pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644
index 000000000000..6e545e26289e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -0,0 +1,77 @@
+/*
+ * SNMP service broadcast connection tracking helper
+ *
+ * (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT 161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+ nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+ nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+ if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+ return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+ return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+ .max_expected = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+ .name = "snmp",
+ .tuple.src.l3num = NFPROTO_IPV4,
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .me = THIS_MODULE,
+ .help = snmp_conntrack_help,
+ .expect_policy = &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+ exp_policy.timeout = timeout;
+ return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+ nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b4d7f0f24b27..0ae142825881 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -29,6 +29,8 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
MODULE_LICENSE("GPL");
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
+ u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -69,13 +72,15 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(
+ &net->ct.hash[st->bucket]));
}
return head;
}
@@ -93,6 +98,9 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
+ struct ct_iter_state *st = seq->private;
+
+ st->time_now = ktime_to_ns(ktime_get_real());
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
@@ -132,6 +140,34 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
}
#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ struct ct_iter_state *st = s->private;
+ struct nf_conn_tstamp *tstamp;
+ s64 delta_time;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ delta_time = st->time_now - tstamp->start;
+ if (delta_time > 0)
+ delta_time = div_s64(delta_time, NSEC_PER_SEC);
+ else
+ delta_time = 0;
+
+ return seq_printf(s, "delta-time=%llu ",
+ (unsigned long long)delta_time);
+ }
+ return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ return 0;
+}
+#endif
+
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@@ -200,6 +236,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
+ if (ct_show_delta_time(s, ct))
+ goto release;
+
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644
index 000000000000..af7dd31af0a1
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+ {
+ .procname = "nf_conntrack_timestamp",
+ .data = &init_net.ct.sysctl_tstamp,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_tstamp),
+ .align = __alignof__(struct nf_conn_tstamp),
+ .id = NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ table[0].data = &net->ct.sysctl_tstamp;
+
+ net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+ nf_net_netfilter_sysctl_path, table);
+ if (!net->ct.tstamp_sysctl_header) {
+ printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+ goto out_register;
+ }
+ return 0;
+
+out_register:
+ kfree(table);
+out:
+ return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+ kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+ int ret;
+
+ net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&tstamp_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+ "extension\n");
+ goto out_extend_register;
+ }
+ }
+
+ ret = nf_conntrack_tstamp_init_sysctl(net);
+ if (ret < 0)
+ goto out_sysctl;
+
+ return 0;
+
+out_sysctl:
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+ return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+ nf_conntrack_tstamp_fini_sysctl(net);
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..20c775cff2a8 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -161,7 +161,8 @@ static int seq_show(struct seq_file *s, void *v)
struct nf_logger *t;
int ret;
- logger = nf_loggers[*pos];
+ logger = rcu_dereference_protected(nf_loggers[*pos],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -249,7 +250,8 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
- logger = nf_loggers[tindex];
+ logger = rcu_dereference_protected(nf_loggers[tindex],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
table->data = "NONE";
else
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 74aebed5bd28..5ab22e2bbd7d 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -27,14 +27,17 @@ static DEFINE_MUTEX(queue_handler_mutex);
int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
int ret;
+ const struct nf_queue_handler *old;
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] == qh)
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old == qh)
ret = -EEXIST;
- else if (queue_handler[pf])
+ else if (old)
ret = -EBUSY;
else {
rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@ EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
+ const struct nf_queue_handler *old;
+
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] && queue_handler[pf] != qh) {
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old && old != qh) {
mutex_unlock(&queue_handler_mutex);
return -EINVAL;
}
@@ -73,7 +80,10 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
mutex_lock(&queue_handler_mutex);
for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
- if (queue_handler[pf] == qh)
+ if (rcu_dereference_protected(
+ queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex)
+ ) == qh)
rcu_assign_pointer(queue_handler[pf], NULL);
}
mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
int (*okfn)(struct sk_buff *),
unsigned int queuenum)
{
- int status;
+ int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
#ifdef CONFIG_BRIDGE_NETFILTER
struct net_device *physindev;
@@ -128,16 +138,20 @@ static int __nf_queue(struct sk_buff *skb,
rcu_read_lock();
qh = rcu_dereference(queue_handler[pf]);
- if (!qh)
+ if (!qh) {
+ status = -ESRCH;
goto err_unlock;
+ }
afinfo = nf_get_afinfo(pf);
if (!afinfo)
goto err_unlock;
entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
- if (!entry)
+ if (!entry) {
+ status = -ENOMEM;
goto err_unlock;
+ }
*entry = (struct nf_queue_entry) {
.skb = skb,
@@ -151,11 +165,9 @@ static int __nf_queue(struct sk_buff *skb,
/* If it's going away, ignore hook. */
if (!try_module_get(entry->elem->owner)) {
- rcu_read_unlock();
- kfree(entry);
- return 0;
+ status = -ECANCELED;
+ goto err_unlock;
}
-
/* Bump dev refs so they don't vanish while packet is out */
if (indev)
dev_hold(indev);
@@ -182,14 +194,13 @@ static int __nf_queue(struct sk_buff *skb,
goto err;
}
- return 1;
+ return 0;
err_unlock:
rcu_read_unlock();
err:
- kfree_skb(skb);
kfree(entry);
- return 1;
+ return status;
}
int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
unsigned int queuenum)
{
struct sk_buff *segs;
+ int err;
+ unsigned int queued;
if (!skb_is_gso(skb))
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@ int nf_queue(struct sk_buff *skb,
}
segs = skb_gso_segment(skb, 0);
- kfree_skb(skb);
+ /* Does not use PTR_ERR to limit the number of error codes that can be
+ * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
+ * 'ignore this hook'.
+ */
if (IS_ERR(segs))
- return 1;
+ return -EINVAL;
+ queued = 0;
+ err = 0;
do {
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
- queuenum))
+ if (err == 0)
+ err = __nf_queue(segs, elem, pf, hook, indev,
+ outdev, okfn, queuenum);
+ if (err == 0)
+ queued++;
+ else
kfree_skb(segs);
segs = nskb;
} while (segs);
- return 1;
+
+ /* also free orig skb if only some segments were queued */
+ if (unlikely(err && queued))
+ err = 0;
+ if (err == 0)
+ kfree_skb(skb);
+ return err;
}
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
struct sk_buff *skb = entry->skb;
struct list_head *elem = &entry->elem->list;
const struct nf_afinfo *afinfo;
+ int err;
rcu_read_lock();
@@ -270,10 +299,17 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable();
break;
case NF_QUEUE:
- if (!__nf_queue(skb, elem, entry->pf, entry->hook,
- entry->indev, entry->outdev, entry->okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ err = __nf_queue(skb, elem, entry->pf, entry->hook,
+ entry->indev, entry->outdev, entry->okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+ if (err == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
break;
case NF_STOLEN:
default:
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 6a1572b0ab41..91592da504b9 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -874,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&instance_table[st->bucket]))
- return rcu_dereference_bh(instance_table[st->bucket].first);
+ return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return NULL;
}
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
{
- h = rcu_dereference_bh(h->next);
+ h = rcu_dereference_bh(hlist_next_rcu(h));
while (!h) {
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
- h = rcu_dereference_bh(instance_table[st->bucket].first);
+ h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return h;
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 68e67d19724d..b83123f12b42 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
struct sk_buff *nskb;
struct nfqnl_instance *queue;
- int err;
+ int err = -ENOBUFS;
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum);
- if (!queue)
+ if (!queue) {
+ err = -ESRCH;
goto err_out;
+ }
- if (queue->copy_mode == NFQNL_COPY_NONE)
+ if (queue->copy_mode == NFQNL_COPY_NONE) {
+ err = -EINVAL;
goto err_out;
+ }
nskb = nfqnl_build_packet_message(queue, entry);
- if (nskb == NULL)
+ if (nskb == NULL) {
+ err = -ENOMEM;
goto err_out;
-
+ }
spin_lock_bh(&queue->lock);
- if (!queue->peer_pid)
+ if (!queue->peer_pid) {
+ err = -EINVAL;
goto err_out_free_nskb;
-
+ }
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
if (net_ratelimit())
@@ -432,7 +438,7 @@ err_out_free_nskb:
err_out_unlock:
spin_unlock_bh(&queue->lock);
err_out:
- return -1;
+ return err;
}
static int
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c94237631077..0a77d2ff2154 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/audit.h>
#include <net/net_namespace.h>
#include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct compat_delta {
- struct compat_delta *next;
- unsigned int offset;
- int delta;
+ unsigned int offset; /* offset in kernel */
+ int delta; /* delta in 32bit user land */
};
struct xt_af {
@@ -49,7 +49,9 @@ struct xt_af {
struct list_head target;
#ifdef CONFIG_COMPAT
struct mutex compat_mutex;
- struct compat_delta *compat_offsets;
+ struct compat_delta *compat_tab;
+ unsigned int number; /* number of slots in compat_tab[] */
+ unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
@@ -414,54 +416,67 @@ int xt_check_match(struct xt_mtchk_param *par,
EXPORT_SYMBOL_GPL(xt_check_match);
#ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
- struct compat_delta *tmp;
+ struct xt_af *xp = &xt[af];
- tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
+ if (!xp->compat_tab) {
+ if (!xp->number)
+ return -EINVAL;
+ xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+ if (!xp->compat_tab)
+ return -ENOMEM;
+ xp->cur = 0;
+ }
- tmp->offset = offset;
- tmp->delta = delta;
+ if (xp->cur >= xp->number)
+ return -EINVAL;
- if (xt[af].compat_offsets) {
- tmp->next = xt[af].compat_offsets->next;
- xt[af].compat_offsets->next = tmp;
- } else {
- xt[af].compat_offsets = tmp;
- tmp->next = NULL;
- }
+ if (xp->cur)
+ delta += xp->compat_tab[xp->cur - 1].delta;
+ xp->compat_tab[xp->cur].offset = offset;
+ xp->compat_tab[xp->cur].delta = delta;
+ xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
- struct compat_delta *tmp, *next;
-
- if (xt[af].compat_offsets) {
- for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
- next = tmp->next;
- kfree(tmp);
- }
- xt[af].compat_offsets = NULL;
+ if (xt[af].compat_tab) {
+ vfree(xt[af].compat_tab);
+ xt[af].compat_tab = NULL;
+ xt[af].number = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
- struct compat_delta *tmp;
- int delta;
-
- for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
- if (tmp->offset < offset)
- delta += tmp->delta;
- return delta;
+ struct compat_delta *tmp = xt[af].compat_tab;
+ int mid, left = 0, right = xt[af].cur - 1;
+
+ while (left <= right) {
+ mid = (left + right) >> 1;
+ if (offset > tmp[mid].offset)
+ left = mid + 1;
+ else if (offset < tmp[mid].offset)
+ right = mid - 1;
+ else
+ return mid ? tmp[mid - 1].delta : 0;
+ }
+ WARN_ON_ONCE(1);
+ return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+ xt[af].number = number;
+ xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +835,21 @@ xt_replace_table(struct xt_table *table,
*/
local_bh_enable();
+#ifdef CONFIG_AUDIT
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_NETFILTER_CFG);
+ if (ab) {
+ audit_log_format(ab, "table=%s family=%u entries=%u",
+ table->name, table->af,
+ private->number);
+ audit_log_end(ab);
+ }
+ }
+#endif
+
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1368,7 @@ static int __init xt_init(void)
mutex_init(&xt[i].mutex);
#ifdef CONFIG_COMPAT
mutex_init(&xt[i].compat_mutex);
- xt[i].compat_offsets = NULL;
+ xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644
index 000000000000..81802d27346e
--- /dev/null
+++ b/net/netfilter/xt_AUDIT.c
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+ unsigned int proto, unsigned int offset)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE: {
+ const __be16 *pptr;
+ __be16 _ports[2];
+
+ pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+ if (pptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " sport=%hu dport=%hu",
+ ntohs(pptr[0]), ntohs(pptr[1]));
+ }
+ break;
+
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6: {
+ const u8 *iptr;
+ u8 _ih[2];
+
+ iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+ if (iptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+ iptr[0], iptr[1]);
+
+ }
+ break;
+ }
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct iphdr _iph;
+ const struct iphdr *ih;
+
+ ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+ &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+ if (ntohs(ih->frag_off) & IP_OFFSET) {
+ audit_log_format(ab, " frag=1");
+ return;
+ }
+
+ audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct ipv6hdr _ip6h;
+ const struct ipv6hdr *ih;
+ u8 nexthdr;
+ int offset;
+
+ ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ nexthdr = ih->nexthdr;
+ offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+ &nexthdr);
+
+ audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+ &ih->saddr, &ih->daddr, nexthdr);
+
+ if (offset)
+ audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+ if (ab == NULL)
+ goto errout;
+
+ audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+ info->type, par->hooknum, skb->len,
+ par->in ? par->in->name : "?",
+ par->out ? par->out->name : "?");
+
+ if (skb->mark)
+ audit_log_format(ab, " mark=%#x", skb->mark);
+
+ if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+ audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+
+ if (par->family == NFPROTO_BRIDGE) {
+ switch (eth_hdr(skb)->h_proto) {
+ case __constant_htons(ETH_P_IP):
+ audit_ip4(ab, skb);
+ break;
+
+ case __constant_htons(ETH_P_IPV6):
+ audit_ip6(ab, skb);
+ break;
+ }
+ }
+ }
+
+ switch (par->family) {
+ case NFPROTO_IPV4:
+ audit_ip4(ab, skb);
+ break;
+
+ case NFPROTO_IPV6:
+ audit_ip6(ab, skb);
+ break;
+ }
+
+ audit_log_end(ab);
+
+errout:
+ return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+
+ if (info->type > XT_AUDIT_TYPE_MAX) {
+ pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+ XT_AUDIT_TYPE_MAX);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+ .name = "AUDIT",
+ .family = NFPROTO_UNSPEC,
+ .target = audit_tg,
+ .targetsize = sizeof(struct xt_audit_info),
+ .checkentry = audit_tg_check,
+ .me = THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+ return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+ xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index c2c0e4abeb99..af9c4dadf816 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -19,12 +19,14 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Qdisc classification");
MODULE_ALIAS("ipt_CLASSIFY");
MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
static unsigned int
classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static struct xt_target classify_tg_reg __read_mostly = {
- .name = "CLASSIFY",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .table = "mangle",
- .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
- (1 << NF_INET_POST_ROUTING),
- .target = classify_tg,
- .targetsize = sizeof(struct xt_classify_target_info),
- .me = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_POST_ROUTING),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_ARP,
+ .hooks = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
};
static int __init classify_tg_init(void)
{
- return xt_register_target(&classify_tg_reg);
+ return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
static void __exit classify_tg_exit(void)
{
- xt_unregister_target(&classify_tg_reg);
+ xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
module_init(classify_tg_init);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index be1f22e13545..3bdd443aaf15 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -313,3 +313,5 @@ MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index a4140509eea1..993de2ba89d3 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -31,6 +31,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
static LIST_HEAD(xt_led_triggers);
static DEFINE_MUTEX(xt_led_mutex);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 039cce1bde3d..d4f4b5d66b20 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -72,18 +72,31 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
if (info->queues_total > 1) {
if (par->family == NFPROTO_IPV4)
- queue = hash_v4(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v4(skb) * info->queues_total) >>
+ 32) + queue;
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
else if (par->family == NFPROTO_IPV6)
- queue = hash_v6(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v6(skb) * info->queues_total) >>
+ 32) + queue;
#endif
}
return NF_QUEUE_NR(queue);
}
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
+ unsigned int ret = nfqueue_tg_v1(skb, par);
+
+ if (info->bypass)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+ return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
u32 maxid;
if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@ static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
info->queues_total, maxid);
return -ERANGE;
}
+ if (par->target->revision == 2 && info->bypass > 1)
+ return -EINVAL;
return 0;
}
@@ -115,11 +130,20 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
.name = "NFQUEUE",
.revision = 1,
.family = NFPROTO_UNSPEC,
- .checkentry = nfqueue_tg_v1_check,
+ .checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
+ {
+ .name = "NFQUEUE",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfqueue_tg_check,
+ .target = nfqueue_tg_v2,
+ .targetsize = sizeof(struct xt_NFQ_info_v2),
+ .me = THIS_MODULE,
+ },
};
static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 5c5b6b921b84..e029c4807404 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -185,18 +185,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
int connections;
ct = nf_ct_get(skb, &ctinfo);
- if (ct != NULL)
- tuple_ptr = &ct->tuplehash[0].tuple;
- else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- par->family, &tuple))
+ if (ct != NULL) {
+ if (info->flags & XT_CONNLIMIT_DADDR)
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ else
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ par->family, &tuple)) {
goto hotdrop;
+ }
if (par->family == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
- memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+ memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+ &iph->daddr : &iph->saddr, sizeof(addr.ip6));
} else {
const struct iphdr *iph = ip_hdr(skb);
- addr.ip = iph->saddr;
+ addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+ iph->daddr : iph->saddr;
}
spin_lock_bh(&info->data->lock);
@@ -204,13 +210,12 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
&info->mask, par->family);
spin_unlock_bh(&info->data->lock);
- if (connections < 0) {
+ if (connections < 0)
/* kmalloc failed, drop it entirely */
- par->hotdrop = true;
- return false;
- }
+ goto hotdrop;
- return (connections > info->limit) ^ info->inverse;
+ return (connections > info->limit) ^
+ !!(info->flags & XT_CONNLIMIT_INVERT);
hotdrop:
par->hotdrop = true;
@@ -268,25 +273,38 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
kfree(info->data);
}
-static struct xt_match connlimit_mt_reg __read_mostly = {
- .name = "connlimit",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+ {
+ .name = "connlimit",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init connlimit_mt_init(void)
{
- return xt_register_match(&connlimit_mt_reg);
+ return xt_register_matches(connlimit_mt_reg,
+ ARRAY_SIZE(connlimit_mt_reg));
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_match(&connlimit_mt_reg);
+ xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index e536710ad916..4ef1b63ad73f 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -112,6 +112,54 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
return true;
}
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+ return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+ const struct nf_conn *ct)
+{
+ const struct nf_conntrack_tuple *tuple;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+ (nf_ct_protonum(ct) == info->l4proto) ^
+ !(info->invert_flags & XT_CONNTRACK_PROTO))
+ return false;
+
+ /* Shortcut to match all recognized protocols by using ->src.all. */
+ if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+ !port_match(info->origsrc_port, info->origsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+ !port_match(info->origdst_port, info->origdst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+ return false;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+ !port_match(info->replsrc_port, info->replsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+ !port_match(info->repldst_port, info->repldst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+ return false;
+
+ return true;
+}
+
static bool
conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
!(info->invert_flags & XT_CONNTRACK_REPLDST))
return false;
- if (!ct_proto_port_check(info, ct))
- return false;
+ if (par->match->revision != 3) {
+ if (!ct_proto_port_check(info, ct))
+ return false;
+ } else {
+ if (!ct_proto_port_check_v3(par->matchinfo, ct))
+ return false;
+ }
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
(!!(status_mask & ct->status) ^
@@ -207,6 +260,14 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+ return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
@@ -244,6 +305,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
+ {
+ .name = "conntrack",
+ .revision = 3,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo3),
+ .match = conntrack_mt_v3,
+ .checkentry = conntrack_mt_check,
+ .destroy = conntrack_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init conntrack_mt_init(void)
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
index b39db8a5cbae..c7a2e5466bc4 100644
--- a/net/netfilter/xt_cpu.c
+++ b/net/netfilter/xt_cpu.c
@@ -22,6 +22,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
static int cpu_mt_check(const struct xt_mtchk_param *par)
{
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644
index 000000000000..d9202cdd25c9
--- /dev/null
+++ b/net/netfilter/xt_devgroup.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+ return false;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+ return false;
+
+ return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+ XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD)))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ par->hook_mask & ~((1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+ .name = "devgroup",
+ .match = devgroup_mt,
+ .checkentry = devgroup_mt_checkentry,
+ .matchsize = sizeof(struct xt_devgroup_info),
+ .family = NFPROTO_UNSPEC,
+ .me = THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+ return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+ xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 88f7c3511c72..d3eb5ed1892f 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
- &info->src_max.ip,
+ &info->src_min.ip,
&info->src_max.ip);
return false;
}
@@ -78,15 +78,27 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
m ^= !!(info->flags & IPRANGE_SRC_INV);
- if (m)
+ if (m) {
+ pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->saddr,
+ (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+ &info->src_min.in6,
+ &info->src_max.in6);
return false;
+ }
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
m ^= !!(info->flags & IPRANGE_DST_INV);
- if (m)
+ if (m) {
+ pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->daddr,
+ (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+ &info->dst_min.in6,
+ &info->dst_max.in6);
return false;
+ }
}
return true;
}
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 9127a3d8aa35..bb10b0717f1b 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+ cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
if (unlikely(cp == NULL)) {
match = false;
goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644
index 000000000000..061d48cec137
--- /dev/null
+++ b/net/netfilter/xt_set.c
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+ u8 pf, u8 dim, u8 flags, int inv)
+{
+ if (ip_set_test(index, skb, pf, dim, flags))
+ inv = !inv;
+ return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.u.compat.dim,
+ info->match_set.u.compat.flags,
+ info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+ u_int8_t i;
+
+ /* Fill out compatibility data according to enum ip_set_kopt */
+ info->u.compat.dim = IPSET_DIM_ZERO;
+ if (info->u.flags[0] & IPSET_MATCH_INV)
+ info->u.compat.flags |= IPSET_INV_MATCH;
+ for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+ info->u.compat.dim++;
+ if (info->u.flags[i] & IPSET_SRC)
+ info->u.compat.flags |= (1<<info->u.compat.dim);
+ }
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->match_set);
+
+ return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index, skb, par->family,
+ info->add_set.u.compat.dim,
+ info->add_set.u.compat.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index, skb, par->family,
+ info->del_set.u.compat.dim,
+ info->del_set.u.compat.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+ struct xt_set_info_target_v0 *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+ info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->add_set);
+ compat_flags(&info->del_set);
+
+ return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.dim,
+ info->match_set.flags,
+ info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.dim > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->add_set.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->del_set.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.dim > IPSET_DIM_MAX ||
+ info->del_set.flags > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 0,
+ .match = set_match_v0,
+ .matchsize = sizeof(struct xt_set_info_match_v0),
+ .checkentry = set_match_v0_checkentry,
+ .destroy = set_match_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+ {
+ .name = "SET",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = set_target_v0,
+ .targetsize = sizeof(struct xt_set_info_target_v0),
+ .checkentry = set_target_v0_checkentry,
+ .destroy = set_target_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static int __init xt_set_init(void)
+{
+ int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+ if (!ret) {
+ ret = xt_register_targets(set_targets,
+ ARRAY_SIZE(set_targets));
+ if (ret)
+ xt_unregister_matches(set_matches,
+ ARRAY_SIZE(set_matches));
+ }
+ return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+ xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+ xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f83cb370292b..1781d99145e2 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM;
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
if (ops->dumpit == NULL)
return -EOPNOTSUPP;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 91cb1d71f018..c60649ec1193 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -164,7 +164,6 @@ struct packet_mreq_max {
static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
int closing, int tx_ring);
-#define PGV_FROM_VMALLOC 1
struct pgv {
char *buffer;
};
@@ -523,11 +522,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
{
struct sk_filter *filter;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = sk_run_filter(skb, filter->insns);
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return res;
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 9542449c0720..da8adac2bf06 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,7 +50,6 @@ rdsdebug(char *fmt, ...)
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
#define RDS_CONG_MAP_BYTES (65536 / 8)
-#define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf765876458..7fce6dfd2180 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
default y
config RFKILL_INPUT
- bool "RF switch input support" if EMBEDDED
+ bool "RF switch input support" if EXPERT
depends on RFKILL
depends on INPUT = y || RFKILL = INPUT
- default y if !EMBEDDED
+ default y if !EXPERT
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f04d4a484d53..8c19b6e3201e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -205,6 +205,29 @@ config NET_SCH_DRR
If unsure, say N.
+config NET_SCH_MQPRIO
+ tristate "Multi-queue priority scheduler (MQPRIO)"
+ help
+ Say Y here if you want to use the Multi-queue Priority scheduler.
+ This scheduler allows QOS to be offloaded on NICs that have support
+ for offloading QOS schedulers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sch_mqprio.
+
+ If unsure, say N.
+
+config NET_SCH_CHOKE
+ tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+ help
+ Say Y here if you want to use the CHOKe packet scheduler (CHOose
+ and Keep for responsive flows, CHOose and Kill for unresponsive
+ flows). This is a variation of RED which trys to penalize flows
+ that monopolize the queue.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_choke.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
@@ -243,7 +266,7 @@ config NET_CLS_TCINDEX
config NET_CLS_ROUTE4
tristate "Routing decision (ROUTE)"
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
select NET_CLS
---help---
If you say Y here, you will be able to classify packets
@@ -252,9 +275,6 @@ config NET_CLS_ROUTE4
To compile this code as a module, choose M here: the
module will be called cls_route.
-config NET_CLS_ROUTE
- bool
-
config NET_CLS_FW
tristate "Netfilter mark (FW)"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 960f5dba6304..06c6cdfd1948 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -32,6 +32,9 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
+
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 23b25f89e7e0..15873e14cb54 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo)
{
struct tcf_common *p;
- int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+ int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_common *p, *s_p;
struct nlattr *nest;
- int i= 0, n_i = 0;
+ int i = 0, n_i = 0;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
while (p != NULL) {
s_p = p->tcfc_next;
if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
- module_put(a->ops->owner);
+ module_put(a->ops->owner);
n_i++;
p = s_p;
}
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
nla_nest_end(skb, nest);
return err;
}
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *a;
struct tc_action_ops *a_o;
char act_name[IFNAMSIZ];
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
int err;
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free;
/* module count goes up only when brand new policy is created
- if it exists and is only bound to in a_o->init() then
- ACT_P_CREATED is not returned (a zero is).
- */
+ * if it exists and is only bound to in a_o->init() then
+ * ACT_P_CREATED is not returned (a zero is).
+ */
if (err != ACT_P_CREATED)
module_put(a_o->owner);
a->ops = a_o;
@@ -569,7 +570,7 @@ err_out:
struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind)
{
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
int err;
int i;
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
{
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct tc_action *a;
int index;
int err;
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct tcamsg *t;
struct netlink_callback dcb;
struct nlattr *nest;
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
int err = -ENOMEM;
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
return 0;
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 pid, int event)
{
int i, ret;
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
return ret;
- if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+ if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
if (tb[1] != NULL)
return tca_action_flush(net, tb[1], n, pid);
else
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* now do the delete */
tcf_action_destroy(head, 0);
ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
- n->nlmsg_flags&NLM_F_ECHO);
+ n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0)
return 0;
return ret;
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy
* stays intact
- * */
+ */
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
}
- /* n->nlmsg_flags&NLM_F_CREATE
- * */
+ /* n->nlmsg_flags & NLM_F_CREATE */
switch (n->nlmsg_type) {
case RTM_NEWACTION:
/* we are going to assume all other flags
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* but since we want avoid ambiguity (eg when flags
* is zero) then just set this
*/
- if (n->nlmsg_flags&NLM_F_REPLACE)
+ if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@ replay:
static struct nlattr *
find_dump_kind(const struct nlmsghdr *n)
{
- struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+ struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind;
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
}
a_o = tc_lookup_action(kind);
- if (a_o == NULL) {
+ if (a_o == NULL)
return 0;
- }
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 83ddfc07e45d..6cdf9abe475f 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
if (nla == NULL)
return -EINVAL;
- err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+ err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
if (err < 0)
return err;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index c2ed90a4c0b4..2b4ab4b05ce8 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
}
typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
#endif /* CONFIG_GACT_PROB */
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
bind, &gact_idx_gen, &gact_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
- printk(KERN_INFO "GACT probability on\n");
+ pr_info("GACT probability on\n");
#else
- printk(KERN_INFO "GACT probability NOT on\n");
+ pr_info("GACT probability NOT on\n");
#endif
return tcf_register_action(&act_gact_ops);
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index c2a7c20e81c1..9fc211a1b20e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
&ipt_idx_gen, &ipt_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t))
goto err2;
- if ((err = ipt_init_target(t, tname, hook)) < 0)
+ err = ipt_init_target(t, tname, hook);
+ if (err < 0)
goto err3;
spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
- worry later - danger - this API seems to have changed
- from earlier kernels */
+ * worry later - danger - this API seems to have changed
+ * from earlier kernels
+ */
par.in = skb->dev;
par.out = NULL;
par.hooknum = ipt->tcfi_hook;
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c;
/* for simple targets kernel size == user size
- ** user name = target name
- ** for foolproof you need to not assume this
- */
+ * user name = target name
+ * for foolproof you need to not assume this
+ */
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t))
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d765067e99db..961386e2f2c0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock,
};
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
if (m) {
if (bind)
m->tcf_bindcnt--;
m->tcf_refcnt--;
- if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+ if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
list_del(&m->tcfm_list);
if (m->tcfm_dev)
dev_put(m->tcfm_dev);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 178a4bd7b7cb..762b027650a9 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 445bef716f77..50c7c06c019d 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&pedit_idx_gen, &pedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0;
unsigned int off;
- if (skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- return p->tcf_action;
- }
- }
+ if (skb_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return p->tcf_action;
off = skb_network_offset(skb);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e2f08b1e2e58..8a1630774fd6 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,8 @@
#include <net/act_api.h>
#include <net/netlink.h>
-#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
#define POL_TAB_MASK 15
static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
};
/* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
u32 index;
int action;
u32 limit;
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
- unsigned h;
+ unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7287cff7af3e..a34a22de60b3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
- **/
+ */
pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&simp_idx_gen, &simp_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
{
struct tcf_defact *d = a->priv;
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 836f5fee9e58..5f6f0c7c3905 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&skbedit_idx_gen, &skbedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_skbedit(pc);
ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_skbedit *d = a->priv;
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5fd0c28ef79a..bb2c523f8158 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
int rc = -ENOENT;
write_lock(&cls_mod_lock);
- for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+ for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
if (t == ops)
break;
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp)
- first = tp->prio-1;
+ first = tp->prio - 1;
return first;
}
@@ -149,7 +149,8 @@ replay:
if (prio == 0) {
/* If no priority is given, user wants we allocated it. */
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U);
}
@@ -176,7 +177,8 @@ replay:
}
/* Is it classful? */
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
return -EINVAL;
if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@ replay:
goto errout;
/* Check the chain for existence of proto-tcf with this priority */
- for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+ for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (!nprio || (tp->protocol != protocol && protocol))
+ if (!nprio ||
+ (tp->protocol != protocol && protocol))
goto errout;
} else
tp = NULL;
@@ -216,7 +219,8 @@ replay:
goto errout;
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto errout;
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return skb->len;
if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
goto errout;
if (cops->tcf_chain == NULL)
goto errout;
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
- for (tp=*chain, t=0; tp; tp = tp->next, t++) {
- if (t < s_t) continue;
+ for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+ if (t < s_t)
+ continue;
if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue;
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
- arg.w.skip = cb->args[1]-1;
+ arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
tp->ops->walk(tp, &arg.w);
- cb->args[1] = arg.w.count+1;
+ cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
break;
}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index f23d9155b1ef..8be8872dd571 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -21,14 +21,12 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct basic_head
-{
+struct basic_head {
u32 hgenerator;
struct list_head flist;
};
-struct basic_filter
-{
+struct basic_filter {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
-static inline void basic_delete_filter(struct tcf_proto *tp,
- struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
};
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
- unsigned long base, struct nlattr **tb,
- struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est)
{
int err = -EINVAL;
struct tcf_exts e;
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) {
- printk(KERN_ERR "Insufficient number of handles\n");
+ pr_err("Insufficient number of handles\n");
goto errout;
}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d49c40fb7e09..32a335194ca5 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{
struct cgroup_cls_state *cs;
- if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
}
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
- struct nlattr *tb[TCA_CGROUP_MAX+1];
+ struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t;
struct tcf_exts e;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5b271a18bc3a..8ec01391d988 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -276,7 +276,7 @@ fallback:
static u32 flow_get_rtclassid(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (skb_dst(skb))
return skb_dst(skb)->tclassid;
#endif
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 93b0a7b6f9b4..26e7bc4ffb79 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
-struct fw_head
-{
+struct fw_head {
struct fw_filter *ht[HTSIZE];
u32 mask;
};
-struct fw_filter
-{
+struct fw_filter {
struct fw_filter *next;
u32 id;
struct tcf_result res;
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE
};
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
{
if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
int r;
u32 id = skb->mark;
if (head != NULL) {
id &= head->mask;
- for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(id)]; f; f = f->next) {
if (f->id == id) {
*res = f->res;
#ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
}
} else {
/* old method */
- if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+ if (id && (TC_H_MAJ(id) == 0 ||
+ !(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id;
res->class = 0;
return 0;
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
if (head == NULL)
return 0;
- for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
if (f->id == handle)
return (unsigned long)f;
}
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h=0; h<HTSIZE; h++) {
- while ((f=head->ht[h]) != NULL) {
+ for (h = 0; h < HTSIZE; h++) {
+ while ((f = head->ht[h]) != NULL) {
head->ht[h] = f->next;
fw_delete_filter(tp, f);
}
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
- struct fw_filter *f = (struct fw_filter*)arg;
+ struct fw_head *head = (struct fw_head *)tp->root;
+ struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter **fp;
if (head == NULL || f == NULL)
goto out;
- for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+ for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
struct nlattr **tca,
unsigned long *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@ errout:
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
int h;
if (head == NULL)
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct fw_head *head = (struct fw_head *)tp->root;
- struct fw_filter *f = (struct fw_filter*)fh;
+ struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 694dcd85dec8..d580cdfca093 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -23,34 +23,30 @@
#include <net/pkt_cls.h>
/*
- 1. For now we assume that route tags < 256.
- It allows to use direct table lookups, instead of hash tables.
- 2. For now we assume that "from TAG" and "fromdev DEV" statements
- are mutually exclusive.
- 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
+ * 1. For now we assume that route tags < 256.
+ * It allows to use direct table lookups, instead of hash tables.
+ * 2. For now we assume that "from TAG" and "fromdev DEV" statements
+ * are mutually exclusive.
+ * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
-struct route4_fastmap
-{
+struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
-struct route4_head
-{
+struct route4_head {
struct route4_fastmap fastmap[16];
- struct route4_bucket *table[256+1];
+ struct route4_bucket *table[256 + 1];
};
-struct route4_bucket
-{
+struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
- struct route4_filter *ht[16+16+1];
+ struct route4_filter *ht[16 + 16 + 1];
};
-struct route4_filter
-{
+struct route4_filter {
struct route4_filter *next;
u32 id;
int iif;
@@ -61,20 +57,20 @@ struct route4_filter
struct route4_bucket *bkt;
};
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT
};
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
{
- return id&0xF;
+ return id & 0xF;
}
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock);
}
-static inline void
+static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
+
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
}
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
{
- return id&0xFF;
+ return id & 0xFF;
}
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
{
- return (id>>16)&0xF;
+ return (id >> 16) & 0xF;
}
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
{
- return 16 + ((iif>>16)&0xF);
+ return 16 + ((iif >> 16) & 0xF);
}
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
{
return 32;
}
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
- if ((dst = skb_dst(skb)) == NULL)
+ dst = skb_dst(skb);
+ if (!dst)
goto failure;
id = dst->tclassid;
if (head == NULL)
goto old_method;
- iif = ((struct rtable*)dst)->fl.iif;
+ iif = ((struct rtable *)dst)->fl.iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id);
restart:
- if ((b = head->table[h]) != NULL) {
+ b = head->table[h];
+ if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id)
ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@ old_method:
static inline u32 to_hash(u32 id)
{
- u32 h = id&0xFF;
- if (id&0x8000)
+ u32 h = id & 0xFF;
+
+ if (id & 0x8000)
h += 256;
return h;
}
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
if (!(id & 0x8000)) {
if (id > 255)
return 256;
- return id&0xF;
+ return id & 0xF;
}
- return 16 + (id&0xF);
+ return 16 + (id & 0xF);
}
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b;
struct route4_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
if (!head)
return 0;
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h1 > 256)
return 0;
- h2 = from_hash(handle>>16);
+ h2 = from_hash(handle >> 16);
if (h2 > 32)
return 0;
- if ((b = head->table[h1]) != NULL) {
+ b = head->table[h1];
+ if (b) {
for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle)
return (unsigned long)f;
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static inline void
+static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h1=0; h1<=256; h1++) {
+ for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
- if ((b = head->table[h1]) != NULL) {
- for (h2=0; h2<=32; h2++) {
+ b = head->table[h1];
+ if (b) {
+ for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct route4_head *head = (struct route4_head*)tp->root;
- struct route4_filter **fp, *f = (struct route4_filter*)arg;
- unsigned h = 0;
+ struct route4_head *head = (struct route4_head *)tp->root;
+ struct route4_filter **fp, *f = (struct route4_filter *)arg;
+ unsigned int h = 0;
struct route4_bucket *b;
int i;
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle;
b = f->bkt;
- for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+ for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=32; i++)
+ for (i = 0; i <= 32; i++)
if (b->ht[i])
return 0;
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
}
h1 = to_hash(nhandle);
- if ((b = head->table[h1]) == NULL) {
+ b = head->table[h1];
+ if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
+
err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle)
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct route4_filter*)*arg) != NULL) {
+ f = (struct route4_filter *)*arg;
+ if (f) {
if (f->handle != handle && handle)
return -EINVAL;
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
reinsert:
h = from_hash(f->handle >> 16);
- for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+ for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle)
break;
@@ -492,7 +497,8 @@ reinsert:
if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle);
h = from_hash(old_handle >> 16);
- if ((b = head->table[th]) != NULL) {
+ b = head->table[th];
+ if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
*fp = f->next;
@@ -515,7 +521,7 @@ errout:
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct route4_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (head == NULL)
arg->stop = 1;
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct route4_filter *f = (struct route4_filter*)fh;
+ struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (!(f->handle&0x8000)) {
- id = f->id&0xFF;
+ if (!(f->handle & 0x8000)) {
+ id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
}
- if (f->handle&0x80000000) {
- if ((f->handle>>16) != 0xFFFF)
+ if (f->handle & 0x80000000) {
+ if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else {
- id = f->id>>16;
+ id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
}
if (f->res.classid)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 425a1790b048..402c44b241a3 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -66,28 +66,25 @@
powerful classification engine. */
-struct rsvp_head
-{
+struct rsvp_head {
u32 tmap[256/32];
u32 hgenerator;
u8 tgenerator;
struct rsvp_session *ht[256];
};
-struct rsvp_session
-{
+struct rsvp_session {
struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi;
u8 protocol;
u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter *ht[16+1];
+ struct rsvp_filter *ht[16 + 1];
};
-struct rsvp_filter
-{
+struct rsvp_filter {
struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi;
@@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess;
};
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{
- unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
h ^= h>>16;
h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF;
}
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
{
- unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
h ^= h>>16;
h ^= h>>8;
h ^= h>>4;
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst, *src;
u8 protocol;
u8 tunnelid = 0;
@@ -162,13 +161,13 @@ restart:
src = &nhptr->saddr.s6_addr32[0];
dst = &nhptr->daddr.s6_addr32[0];
protocol = nhptr->nexthdr;
- xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+ xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
#else
src = &nhptr->saddr;
dst = &nhptr->daddr;
protocol = nhptr->protocol;
- xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
- if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+ xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+ if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
return -1;
#endif
@@ -176,10 +175,10 @@ restart:
h2 = hash_src(src);
for (s = sht[h1]; s; s = s->next) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+ if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol &&
!(s->dpi.mask &
- (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+ (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
#if RSVP_DST_LEN == 4
dst[0] == s->dst[0] &&
dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@ restart:
tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) {
- if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
- !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+ if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+ !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4
&&
src[0] == f->src[0] &&
@@ -205,7 +204,7 @@ matched:
return 0;
tunnelid = f->res.classid;
- nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+ nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
goto restart;
}
}
@@ -224,11 +223,11 @@ matched:
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1 = handle&0xFF;
- unsigned h2 = (handle>>8)&0xFF;
+ unsigned int h1 = handle & 0xFF;
+ unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16)
return 0;
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static inline void
+static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
sht = data->ht;
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
while ((s = sht[h1]) != NULL) {
sht[h1] = s->next;
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
- unsigned h = f->handle;
+ struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+ unsigned int h = f->handle;
struct rsvp_session **sp;
struct rsvp_session *s = f->sess;
int i;
- for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+ for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=16; i++)
+ for (i = 0; i <= 16; i++)
if (s->ht[i])
return 0;
/* OK, session has no flows */
- for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+ for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
tcf_tree_lock(tp);
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
}
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{
struct rsvp_head *data = tp->root;
int i = 0xFFFF;
while (i-- > 0) {
u32 h;
+
if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000;
h = data->hgenerator|salt;
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static int tunnel_bts(struct rsvp_head *data)
{
- int n = data->tgenerator>>5;
- u32 b = 1<<(data->tgenerator&0x1F);
+ int n = data->tgenerator >> 5;
+ u32 b = 1 << (data->tgenerator & 0x1F);
- if (data->tmap[n]&b)
+ if (data->tmap[n] & b)
return 0;
data->tmap[n] |= b;
return 1;
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
memset(tmap, 0, sizeof(tmap));
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
for (s = sht[h1]; s; s = s->next) {
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
{
int i, k;
- for (k=0; k<2; k++) {
- for (i=255; i>0; i--) {
+ for (k = 0; k < 2; k++) {
+ for (i = 255; i > 0; i--) {
if (++data->tgenerator == 0)
data->tgenerator = 1;
if (tunnel_bts(data))
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst;
int err;
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct rsvp_filter*)*arg) != NULL) {
+ f = (struct rsvp_filter *)*arg;
+ if (f) {
/* Node exists: adjust only classid */
if (f->handle != handle && handle)
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout;
}
- for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+ for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@ insert:
tcf_exts_change(tp, &f->exts, &e);
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
- if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+ if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
wmb();
@@ -567,7 +568,7 @@ errout2:
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct rsvp_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (arg->stop)
return;
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct rsvp_filter *f = (struct rsvp_filter*)fh;
+ struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
if (f->res.classid)
NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
- if (((f->handle>>8)&0xFF) != 16)
+ if (((f->handle >> 8) & 0xFF) != 16)
NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 20ef330bb918..36667fa64237 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
* of the hashing index is below the threshold.
*/
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
- cp.hash = (cp.mask >> cp.shift)+1;
+ cp.hash = (cp.mask >> cp.shift) + 1;
else
cp.hash = DEFAULT_HASH_SIZE;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index b0c2a82178af..966920c14e7a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -42,8 +42,7 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct tc_u_knode
-{
+struct tc_u_knode {
struct tc_u_knode *next;
u32 handle;
struct tc_u_hnode *ht_up;
@@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel;
};
-struct tc_u_hnode
-{
+struct tc_u_hnode {
struct tc_u_hnode *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
- unsigned divisor;
+ unsigned int divisor;
struct tc_u_knode *ht[1];
};
-struct tc_u_common
-{
+struct tc_u_common {
struct tc_u_hnode *hlist;
struct Qdisc *q;
int refcnt;
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE
};
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+ const struct tc_u32_sel *sel,
+ u8 fshift)
{
- unsigned h = ntohl(key & sel->hmask)>>fshift;
+ unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
unsigned int off;
} stack[TC_U32_MAXDEPTH];
- struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
@@ -120,7 +119,7 @@ next_knode:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt +=1;
+ n->pf->rcnt += 1;
j = 0;
#endif
@@ -133,7 +132,7 @@ next_knode:
}
#endif
- for (i = n->sel.nkeys; i>0; i--, key++) {
+ for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, _data;
@@ -148,13 +147,13 @@ next_knode:
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] +=1;
+ n->pf->kcnts[j] += 1;
j++;
#endif
}
if (n->ht_down == NULL) {
check_terminal:
- if (n->sel.flags&TC_U32_TERMINAL) {
+ if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@ check_terminal:
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit +=1;
+ n->pf->rhit += 1;
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
@@ -197,10 +196,10 @@ check_terminal:
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
- if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+ if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
- if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+ if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, _data;
@@ -215,7 +214,7 @@ check_terminal:
}
off2 &= ~3;
}
- if (n->sel.flags&TC_U32_EAT) {
+ if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
@@ -236,11 +235,11 @@ out:
deadloop:
if (net_ratelimit())
- printk(KERN_WARNING "cls_u32: dead loop\n");
+ pr_warning("cls_u32: dead loop\n");
return -1;
}
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
- unsigned sel;
+ unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
- } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+ } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
- for (h=0; h<=ht->divisor; h++) {
+ for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
- return u32_delete_key(tp, (struct tc_u_knode*)ht);
+ return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht)
return -EINVAL;
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
- unsigned i = 0x7FF;
+ unsigned int i = 0x7FF;
- for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+ for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle))
i = TC_U32_NODE(n->handle);
i++;
- return handle|(i>0xFFF ? 0xFFF : i);
+ return handle | (i > 0xFFF ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0)
return err;
- if ((n = (struct tc_u_knode*)*arg) != NULL) {
+ n = (struct tc_u_knode *)*arg;
+ if (n) {
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
}
if (tb[TCA_U32_DIVISOR]) {
- unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+ unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0)
return -ENOMEM;
}
- ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+ ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct tc_u_knode *n = (struct tc_u_knode*)fh;
+ struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest;
if (n == NULL)
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
- struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
- u32 divisor = ht->divisor+1;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+ u32 divisor = ht->divisor + 1;
+
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else {
NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev))
+ if (strlen(n->indev))
NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
#endif
#ifdef CONFIG_CLS_U32_PERF
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index bc450397487a..1c8360a2752a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
return 0;
switch (cmp->align) {
- case TCF_EM_ALIGN_U8:
- val = *ptr;
- break;
+ case TCF_EM_ALIGN_U8:
+ val = *ptr;
+ break;
- case TCF_EM_ALIGN_U16:
- val = get_unaligned_be16(ptr);
+ case TCF_EM_ALIGN_U16:
+ val = get_unaligned_be16(ptr);
- if (cmp_needs_transformation(cmp))
- val = be16_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be16_to_cpu(val);
+ break;
- case TCF_EM_ALIGN_U32:
- /* Worth checking boundries? The branching seems
- * to get worse. Visit again. */
- val = get_unaligned_be32(ptr);
+ case TCF_EM_ALIGN_U32:
+ /* Worth checking boundries? The branching seems
+ * to get worse. Visit again.
+ */
+ val = get_unaligned_be32(ptr);
- if (cmp_needs_transformation(cmp))
- val = be32_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be32_to_cpu(val);
+ break;
- default:
- return 0;
+ default:
+ return 0;
}
if (cmp->mask)
val &= cmp->mask;
switch (cmp->opnd) {
- case TCF_EM_OPND_EQ:
- return val == cmp->val;
- case TCF_EM_OPND_LT:
- return val < cmp->val;
- case TCF_EM_OPND_GT:
- return val > cmp->val;
+ case TCF_EM_OPND_EQ:
+ return val == cmp->val;
+ case TCF_EM_OPND_LT:
+ return val < cmp->val;
+ case TCF_EM_OPND_GT:
+ return val > cmp->val;
}
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 34da5e29ea1a..a889d099320f 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -73,21 +73,18 @@
#include <net/pkt_cls.h>
#include <net/sock.h>
-struct meta_obj
-{
+struct meta_obj {
unsigned long value;
unsigned int len;
};
-struct meta_value
-{
+struct meta_value {
struct tcf_meta_val hdr;
unsigned long val;
unsigned int len;
};
-struct meta_match
-{
+struct meta_match {
struct meta_value lvalue;
struct meta_value rvalue;
};
@@ -255,7 +252,7 @@ META_COLLECTOR(int_rtclassid)
if (unlikely(skb_dst(skb) == NULL))
*err = -1;
else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->value = skb_dst(skb)->tclassid;
#else
dst->value = 0;
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table
**************************************************************************/
-struct meta_ops
-{
+struct meta_ops {
void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *);
};
@@ -494,7 +490,7 @@ struct meta_ops
/* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
}
};
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
{
return &__meta_ops[meta_type(val)][meta_id(val)];
}
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
- else if (v->len == sizeof(u32)) {
+ else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val);
- }
return 0;
@@ -663,8 +658,7 @@ nla_put_failure:
* Type specific operations table
**************************************************************************/
-struct meta_type_ops
-{
+struct meta_type_ops {
void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@ struct meta_type_ops
int (*dump)(struct sk_buff *, struct meta_value *, int);
};
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy,
.compare = meta_var_compare,
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
}
};
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{
return &__meta_type_ops[meta_type(v)];
}
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
return err;
if (meta_type_ops(v)->apply_extras)
- meta_type_ops(v)->apply_extras(v, dst);
+ meta_type_ops(v)->apply_extras(v, dst);
return 0;
}
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
switch (meta->lvalue.hdr.op) {
- case TCF_EM_OPND_EQ:
- return !r;
- case TCF_EM_OPND_LT:
- return r < 0;
- case TCF_EM_OPND_GT:
- return r > 0;
+ case TCF_EM_OPND_EQ:
+ return !r;
+ case TCF_EM_OPND_LT:
+ return r < 0;
+ case TCF_EM_OPND_GT:
+ return r > 0;
}
return 0;
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val)
{
- return (!meta_id(val) || meta_ops(val)->get);
+ return !meta_id(val) || meta_ops(val)->get;
}
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 1a4176aee6e5..a3bed07a008b 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h>
-struct nbyte_data
-{
+struct nbyte_data {
struct tcf_em_nbyte hdr;
char pattern[0];
};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index ea8f566e720c..15d353d2e4be 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h>
-struct text_match
-{
+struct text_match {
u16 from_offset;
u16 to_offset;
u8 from_layer;
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 953f1479f7da..797bdb88c010 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
- return !(((*(__be32*) ptr) ^ key->val) & key->mask);
+ return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 5e37da961f80..88d93eb92507 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock);
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{
struct tcf_ematch_ops *e = NULL;
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
}
EXPORT_SYMBOL(tcf_em_unregister);
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
- int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+ int index)
{
return &tree->matches[index];
}
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index
- * referencing an external ematch sequence. */
+ * referencing an external ematch sequence.
+ */
u32 ref;
if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
/* We do not allow backward jumps to avoid loops and jumps
- * to our own position are of course illegal. */
+ * to our own position are of course illegal.
+ */
if (ref <= idx)
goto errout;
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the
- * module is held if the ops field is non zero. */
+ * module is held if the ops field is non zero.
+ */
em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) {
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) {
/* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller
- * to replay the request. */
+ * to replay the request.
+ */
module_put(em->ops->owner);
err = -EAGAIN;
}
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
/* ematch module provides expected length of data, so we
- * can do a basic sanity check. */
+ * can do a basic sanity check.
+ */
if (em->ops->datalen && data_len < em->ops->datalen)
goto errout;
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module
* does not expected a memory reference but rather
- * the value carried. */
+ * the value carried.
+ */
if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32))
goto errout;
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking
- * to this policy will result in parsing failure. */
+ * to this policy will result in parsing failure.
+ */
for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL;
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to
- * undefined references during the matching process. */
+ * undefined references during the matching process.
+ */
if (idx != tree_hdr->nmatches) {
err = -EINVAL;
goto errout_abort;
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags
};
- NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+ NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
int r = em->ops->match(skb, em, info);
+
return tcf_em_is_inverted(em) ? !r : r;
}
@@ -527,8 +536,8 @@ pop_stack:
stack_overflow:
if (net_ratelimit())
- printk(KERN_WARNING "tc ematch: local stack overflow,"
- " increase NET_EMATCH_STACK\n");
+ pr_warning("tc ematch: local stack overflow,"
+ " increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b22ca2d1cebc..150741579408 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
int err = -ENOENT;
write_lock(&qdisc_mod_lock);
- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+ for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (q == qops)
break;
if (q) {
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
if (!tab || --tab->refcnt)
return;
- for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+ for (rtabp = &qdisc_rtab_list;
+ (rtab = *rtabp) != NULL;
+ rtabp = &rtab->next) {
if (rtab == tab) {
*rtabp = rtab->next;
kfree(rtab);
@@ -396,6 +398,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
return stab;
}
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
@@ -405,7 +412,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) {
list_del(&tab->list);
- kfree(tab);
+ call_rcu_bh(&tab->rcu, stab_kfree_rcu);
}
spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@ nla_put_failure:
return -1;
}
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
{
int pkt_len, slot;
@@ -454,14 +461,13 @@ out:
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
{
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
- printk(KERN_WARNING
- "%s: %s qdisc %X: is non-work-conserving?\n",
- txt, qdisc->ops->id, qdisc->handle >> 16);
+ pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+ txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC;
}
}
@@ -472,7 +478,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- wd->qdisc->flags |= TCQ_F_THROTTLED;
+ qdisc_throttled(wd->qdisc);
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -625,7 +631,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
autohandle = TC_H_MAKE(0x80000000U, 0);
} while (qdisc_lookup(dev, autohandle) && --i > 0);
- return i>0 ? autohandle : 0;
+ return i > 0 ? autohandle : 0;
}
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
err = PTR_ERR(stab);
goto err_out4;
}
- sch->stab = stab;
+ rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
spinlock_t *root_lock;
@@ -874,7 +880,7 @@ err_out4:
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
*/
- qdisc_put_stab(sch->stab);
+ qdisc_put_stab(rtnl_dereference(sch->stab));
if (ops->destroy)
ops->destroy(sch);
goto err_out3;
@@ -882,7 +888,7 @@ err_out4:
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
{
- struct qdisc_size_table *stab = NULL;
+ struct qdisc_size_table *ostab, *stab = NULL;
int err = 0;
if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
return PTR_ERR(stab);
}
- qdisc_put_stab(sch->stab);
- sch->stab = stab;
+ ostab = rtnl_dereference(sch->stab);
+ rcu_assign_pointer(sch->stab, stab);
+ qdisc_put_stab(ostab);
if (tca[TCA_RATE]) {
/* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@ out:
return 0;
}
-struct check_loop_arg
-{
- struct qdisc_walker w;
+struct check_loop_arg {
+ struct qdisc_walker w;
struct Qdisc *p;
int depth;
};
@@ -970,7 +976,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) {
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -996,7 +1003,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
return -EINVAL;
} else {
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
return -ENOENT;
}
@@ -1008,7 +1016,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
if (q->handle == 0)
return -ENOENT;
- if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+ err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+ if (err != 0)
return err;
} else {
qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/*
- Create/change qdisc.
+ * Create/change qdisc.
*/
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@ replay:
clid = tcm->tcm_parent;
q = p = NULL;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@ replay:
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue_create(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue_create(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1063,13 +1073,14 @@ replay:
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
- if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+ if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle))
return -EINVAL;
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
goto create_n_graft;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1079,7 +1090,7 @@ replay:
atomic_inc(&q->refcnt);
goto graft;
} else {
- if (q == NULL)
+ if (!q)
goto create_n_graft;
/* This magic test requires explanation.
@@ -1101,9 +1112,9 @@ replay:
* For now we select create/graft, if
* user gave KIND, which does not match existing.
*/
- if ((n->nlmsg_flags&NLM_F_CREATE) &&
- (n->nlmsg_flags&NLM_F_REPLACE) &&
- ((n->nlmsg_flags&NLM_F_EXCL) ||
+ if ((n->nlmsg_flags & NLM_F_CREATE) &&
+ (n->nlmsg_flags & NLM_F_REPLACE) &&
+ ((n->nlmsg_flags & NLM_F_EXCL) ||
(tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id))))
goto create_n_graft;
@@ -1118,7 +1129,7 @@ replay:
/* Change qdisc parameters */
if (q == NULL)
return -ENOENT;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1128,7 +1139,7 @@ replay:
return err;
create_n_graft:
- if (!(n->nlmsg_flags&NLM_F_CREATE))
+ if (!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
+ struct qdisc_size_table *stab;
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
q->qstats.qlen = q->q.qlen;
- if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+ stab = rtnl_dereference(q->stab);
+ if (stab && qdisc_dump_stab(skb, stab) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS;
if (old && !tc_qdisc_dump_ignore(old)) {
- if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+ if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+ 0, RTM_DELQDISC) < 0)
goto err_out;
}
if (new && !tc_qdisc_dump_ignore(new)) {
- if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+ if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+ old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
}
if (skb->len)
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
err_out:
kfree_skb(skb);
@@ -1275,7 +1291,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
q_idx++;
continue;
}
- if (!tc_qdisc_dump_ignore(q) &&
+ if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
goto done;
@@ -1356,7 +1372,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
qid = dev->qdisc->handle;
/* Now qid is genuine qdisc handle consistent
- both with parent and child.
-
- TC_H_MAJ(pid) still may be unspecified, complete it now.
+ * both with parent and child.
+ *
+ * TC_H_MAJ(pid) still may be unspecified, complete it now.
*/
if (pid)
pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/* OK. Locate qdisc */
- if ((q = qdisc_lookup(dev, qid)) == NULL)
+ q = qdisc_lookup(dev, qid);
+ if (!q)
return -ENOENT;
/* An check that it supports classes */
@@ -1423,13 +1441,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cl == 0) {
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTCLASS ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto out;
} else {
switch (n->nlmsg_type) {
case RTM_NEWTCLASS:
err = -EEXIST;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
goto out;
break;
case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL;
}
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
}
-struct qdisc_dump_args
-{
- struct qdisc_walker w;
- struct sk_buff *skb;
- struct netlink_callback *cb;
+struct qdisc_dump_args {
+ struct qdisc_walker w;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
};
static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
@@ -1598,7 +1617,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0;
- if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return 0;
s_t = cb->args[0];
@@ -1621,19 +1641,22 @@ done:
}
/* Main classifier routine: scans classifier chain attached
- to this qdisc, (optionally) tests for protocol and asks
- specific classifiers.
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
*/
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
__be16 protocol = skb->protocol;
- int err = 0;
+ int err;
for (; tp; tp = tp->next) {
- if ((tp->protocol == protocol ||
- tp->protocol == htons(ETH_P_ALL)) &&
- (err = tp->classify(skb, tp, res)) >= 0) {
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+ err = tp->classify(skb, tp, res);
+
+ if (err >= 0) {
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1664,11 +1687,11 @@ reclassify:
if (verd++ >= MAX_REC_LOOP) {
if (net_ratelimit())
- printk(KERN_NOTICE
- "%s: packet reclassify loop"
+ pr_notice("%s: packet reclassify loop"
" rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff, ntohs(tp->protocol));
+ tp->q->ops->id,
+ tp->prio & 0xffff,
+ ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@ static int __init pktsched_init(void)
err = register_pernet_subsys(&psched_net_ops);
if (err) {
- printk(KERN_ERR "pktsched_init: "
+ pr_err("pktsched_init: "
"cannot initialize per netns operations\n");
return err;
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 943d733409d0..3f08158b8688 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete.
*/
if (flow->ref < 2) {
- printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+ pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL;
}
if (flow->ref > 2)
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
flow = NULL;
- done:
- ;
+done:
+ ;
}
- if (!flow)
+ if (!flow) {
flow = &p->link;
- else {
+ } else {
if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
- printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
- flow->ref);
+ pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
atm_tc_put(sch, (unsigned long)flow);
}
tasklet_kill(&p->task);
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
}
if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
- else {
+ else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
- }
nla_nest_end(skb, nest);
return skb->len;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c210c5d..24d94c097b35 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -72,8 +72,7 @@
struct cbq_sched_data;
-struct cbq_class
-{
+struct cbq_class {
struct Qdisc_class_common common;
struct cbq_class *next_alive; /* next class with backlog in this priority band */
@@ -139,19 +138,18 @@ struct cbq_class
int refcnt;
int filters;
- struct cbq_class *defaults[TC_PRIO_MAX+1];
+ struct cbq_class *defaults[TC_PRIO_MAX + 1];
};
-struct cbq_sched_data
-{
+struct cbq_sched_data {
struct Qdisc_class_hash clhash; /* Hash table of all classes */
- int nclasses[TC_CBQ_MAXPRIO+1];
- unsigned quanta[TC_CBQ_MAXPRIO+1];
+ int nclasses[TC_CBQ_MAXPRIO + 1];
+ unsigned int quanta[TC_CBQ_MAXPRIO + 1];
struct cbq_class link;
- unsigned activemask;
- struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
+ unsigned int activemask;
+ struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
with backlog */
#ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
int tx_len;
psched_time_t now; /* Cached timestamp */
psched_time_t now_rt; /* Cached real time */
- unsigned pmask;
+ unsigned int pmask;
struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
};
-#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
{
struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
{
- struct cbq_class *cl, *new;
+ struct cbq_class *cl;
- for (cl = this->tparent; cl; cl = cl->tparent)
- if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
- return new;
+ for (cl = this->tparent; cl; cl = cl->tparent) {
+ struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
+ if (new != NULL && new != this)
+ return new;
+ }
return NULL;
}
#endif
/* Classify packet. The procedure is pretty complicated, but
- it allows us to combine link sharing and priority scheduling
- transparently.
-
- Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
- so that it resolves to split nodes. Then packets are classified
- by logical priority, or a more specific classifier may be attached
- to the split node.
+ * it allows us to combine link sharing and priority scheduling
+ * transparently.
+ *
+ * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
+ * so that it resolves to split nodes. Then packets are classified
+ * by logical priority, or a more specific classifier may be attached
+ * to the split node.
*/
static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 1. If skb->priority points to one of our classes, use it.
*/
- if (TC_H_MAJ(prio^sch->handle) == 0 &&
+ if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback;
- if ((cl = (void*)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (TC_H_MAJ(res.classid))
cl = cbq_class_lookup(q, res.classid);
- else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+ else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
* Step 4. No success...
*/
if (TC_H_MAJ(prio) == 0 &&
- !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+ !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
!(cl = head->defaults[TC_PRIO_BESTEFFORT]))
return head;
@@ -290,12 +291,12 @@ fallback:
}
/*
- A packet has just been enqueued on the empty class.
- cbq_activate_class adds it to the tail of active class list
- of its priority band.
+ * A packet has just been enqueued on the empty class.
+ * cbq_activate_class adds it to the tail of active class list
+ * of its priority band.
*/
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
}
/*
- Unlink class from active chain.
- Note that this same procedure is done directly in cbq_dequeue*
- during round-robin procedure.
+ * Unlink class from active chain.
+ * Note that this same procedure is done directly in cbq_dequeue*
+ * during round-robin procedure.
*/
static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
int toplevel = q->toplevel;
- if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+ if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
psched_time_t now;
psched_tdiff_t incr;
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
q->toplevel = cl->level;
return;
}
- } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+ } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
}
}
@@ -390,7 +391,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -418,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
delay += cl->offtime;
/*
- Class goes to sleep, so that it will have no
- chance to work avgidle. Let's forgive it 8)
-
- BTW cbq-2.0 has a crap in this
- place, apparently they forgot to shift it by cl->ewma_log.
+ * Class goes to sleep, so that it will have no
+ * chance to work avgidle. Let's forgive it 8)
+ *
+ * BTW cbq-2.0 has a crap in this
+ * place, apparently they forgot to shift it by cl->ewma_log.
*/
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -439,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
q->wd_expires = delay;
/* Dirty work! We must schedule wakeups based on
- real available rate, rather than leaf rate,
- which may be tiny (even zero).
+ * real available rate, rather than leaf rate,
+ * which may be tiny (even zero).
*/
if (q->toplevel == TC_CBQ_MAXLEVEL) {
struct cbq_class *b;
@@ -460,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
}
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
- they go overlimit
+ * they go overlimit
*/
static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -595,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
struct Qdisc *sch = q->watchdog.qdisc;
psched_time_t now;
psched_tdiff_t delay = 0;
- unsigned pmask;
+ unsigned int pmask;
now = psched_get_time();
@@ -624,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@@ -649,7 +649,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -665,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
#endif
/*
- It is mission critical procedure.
-
- We "regenerate" toplevel cutoff, if transmitting class
- has backlog and it is not regulated. It is not part of
- original CBQ description, but looks more reasonable.
- Probably, it is wrong. This question needs further investigation.
-*/
+ * It is mission critical procedure.
+ *
+ * We "regenerate" toplevel cutoff, if transmitting class
+ * has backlog and it is not regulated. It is not part of
+ * original CBQ description, but looks more reasonable.
+ * Probably, it is wrong. This question needs further investigation.
+ */
-static __inline__ void
+static inline void
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
struct cbq_class *borrowed)
{
@@ -684,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
q->toplevel = borrowed->level;
return;
}
- } while ((borrowed=borrowed->borrow) != NULL);
+ } while ((borrowed = borrowed->borrow) != NULL);
}
#if 0
/* It is not necessary now. Uncommenting it
@@ -712,10 +711,10 @@ cbq_update(struct cbq_sched_data *q)
cl->bstats.bytes += len;
/*
- (now - last) is total time between packet right edges.
- (last_pktlen/rate) is "virtual" busy time, so that
-
- idle = (now - last) - last_pktlen/rate
+ * (now - last) is total time between packet right edges.
+ * (last_pktlen/rate) is "virtual" busy time, so that
+ *
+ * idle = (now - last) - last_pktlen/rate
*/
idle = q->now - cl->last;
@@ -725,9 +724,9 @@ cbq_update(struct cbq_sched_data *q)
idle -= L2T(cl, len);
/* true_avgidle := (1-W)*true_avgidle + W*idle,
- where W=2^{-ewma_log}. But cl->avgidle is scaled:
- cl->avgidle == true_avgidle/W,
- hence:
+ * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+ * cl->avgidle == true_avgidle/W,
+ * hence:
*/
avgidle += idle - (avgidle>>cl->ewma_log);
}
@@ -741,22 +740,22 @@ cbq_update(struct cbq_sched_data *q)
cl->avgidle = avgidle;
/* Calculate expected time, when this class
- will be allowed to send.
- It will occur, when:
- (1-W)*true_avgidle + W*delay = 0, i.e.
- idle = (1/W - 1)*(-true_avgidle)
- or
- idle = (1 - W)*(-cl->avgidle);
+ * will be allowed to send.
+ * It will occur, when:
+ * (1-W)*true_avgidle + W*delay = 0, i.e.
+ * idle = (1/W - 1)*(-true_avgidle)
+ * or
+ * idle = (1 - W)*(-cl->avgidle);
*/
idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
/*
- That is not all.
- To maintain the rate allocated to the class,
- we add to undertime virtual clock,
- necessary to complete transmitted packet.
- (len/phys_bandwidth has been already passed
- to the moment of cbq_update)
+ * That is not all.
+ * To maintain the rate allocated to the class,
+ * we add to undertime virtual clock,
+ * necessary to complete transmitted packet.
+ * (len/phys_bandwidth has been already passed
+ * to the moment of cbq_update)
*/
idle -= L2T(&q->link, len);
@@ -778,7 +777,7 @@ cbq_update(struct cbq_sched_data *q)
cbq_update_toplevel(q, this, q->tx_borrowed);
}
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -794,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl)
do {
/* It is very suspicious place. Now overlimit
- action is generated for not bounded classes
- only if link is completely congested.
- Though it is in agree with ancestor-only paradigm,
- it looks very stupid. Particularly,
- it means that this chunk of code will either
- never be called or result in strong amplification
- of burstiness. Dangerous, silly, and, however,
- no another solution exists.
+ * action is generated for not bounded classes
+ * only if link is completely congested.
+ * Though it is in agree with ancestor-only paradigm,
+ * it looks very stupid. Particularly,
+ * it means that this chunk of code will either
+ * never be called or result in strong amplification
+ * of burstiness. Dangerous, silly, and, however,
+ * no another solution exists.
*/
- if ((cl = cl->borrow) == NULL) {
+ cl = cl->borrow;
+ if (!cl) {
this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
@@ -816,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl)
return cl;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
struct cbq_sched_data *q = qdisc_priv(sch);
@@ -840,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
if (cl->deficit <= 0) {
/* Class exhausted its allotment per
- this round. Switch to the next one.
+ * this round. Switch to the next one.
*/
deficit = 1;
cl->deficit += cl->quantum;
@@ -850,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
- It could occur even if cl->q->q.qlen != 0
- f.e. if cl->q == "tbf"
+ * It could occur even if cl->q->q.qlen != 0
+ * f.e. if cl->q == "tbf"
*/
if (skb == NULL)
goto skip_class;
@@ -880,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skip_class:
if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized.
- Unlink it from active chain.
+ * Unlink it from active chain.
*/
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
@@ -919,14 +919,14 @@ next_class:
return NULL;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- unsigned activemask;
+ unsigned int activemask;
- activemask = q->activemask&0xFF;
+ activemask = q->activemask & 0xFF;
while (activemask) {
int prio = ffz(~activemask);
activemask &= ~(1<<prio);
@@ -951,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch)
if (q->tx_class) {
psched_tdiff_t incr2;
/* Time integrator. We calculate EOS time
- by adding expected packet transmission time.
- If real time is greater, we warp artificial clock,
- so that:
-
- cbq_time = max(real_time, work);
+ * by adding expected packet transmission time.
+ * If real time is greater, we warp artificial clock,
+ * so that:
+ *
+ * cbq_time = max(real_time, work);
*/
incr2 = L2T(&q->link, q->tx_len);
q->now += incr2;
@@ -971,28 +971,29 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
return skb;
}
/* All the classes are overlimit.
-
- It is possible, if:
-
- 1. Scheduler is empty.
- 2. Toplevel cutoff inhibited borrowing.
- 3. Root class is overlimit.
-
- Reset 2d and 3d conditions and retry.
-
- Note, that NS and cbq-2.0 are buggy, peeking
- an arbitrary class is appropriate for ancestor-only
- sharing, but not for toplevel algorithm.
-
- Our version is better, but slower, because it requires
- two passes, but it is unavoidable with top-level sharing.
- */
+ *
+ * It is possible, if:
+ *
+ * 1. Scheduler is empty.
+ * 2. Toplevel cutoff inhibited borrowing.
+ * 3. Root class is overlimit.
+ *
+ * Reset 2d and 3d conditions and retry.
+ *
+ * Note, that NS and cbq-2.0 are buggy, peeking
+ * an arbitrary class is appropriate for ancestor-only
+ * sharing, but not for toplevel algorithm.
+ *
+ * Our version is better, but slower, because it requires
+ * two passes, but it is unavoidable with top-level sharing.
+ */
if (q->toplevel == TC_CBQ_MAXLEVEL &&
q->link.undertime == PSCHED_PASTPERFECT)
@@ -1003,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch)
}
/* No packets in scheduler or nobody wants to give them to us :-(
- Sigh... start watchdog timer in the last case. */
+ * Sigh... start watchdog timer in the last case.
+ */
if (sch->q.qlen) {
sch->qstats.overlimits++;
@@ -1025,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
int level = 0;
struct cbq_class *cl;
- if ((cl = this->children) != NULL) {
+ cl = this->children;
+ if (cl) {
do {
if (cl->level > level)
level = cl->level;
} while ((cl = cl->sibling) != this->children);
}
- this->level = level+1;
+ this->level = level + 1;
} while ((this = this->tparent) != NULL);
}
@@ -1047,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of
- arithmetic overflows!
+ * arithmetic overflows!
*/
if (cl->priority == prio) {
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio];
}
if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
- printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+ pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+ cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
}
}
@@ -1065,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
- unsigned h;
+ unsigned int h;
int i;
if (split == NULL)
return;
- for (i=0; i<=TC_PRIO_MAX; i++) {
- if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
+ if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
split->defaults[i] = NULL;
}
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
int level = split->level;
if (split->defaults[i])
@@ -1089,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
hlist_for_each_entry(c, n, &q->clhash.hash[h],
common.hnode) {
if (c->split == split && c->level < level &&
- c->defmap&(1<<i)) {
+ c->defmap & (1<<i)) {
split->defaults[i] = c;
level = c->level;
}
@@ -1103,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
struct cbq_class *split = NULL;
if (splitid == 0) {
- if ((split = cl->split) == NULL)
+ split = cl->split;
+ if (!split)
return;
splitid = split->common.classid;
}
@@ -1121,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
cl->defmap = 0;
cbq_sync_defmap(cl);
cl->split = split;
- cl->defmap = def&mask;
+ cl->defmap = def & mask;
} else
- cl->defmap = (cl->defmap&~mask)|(def&mask);
+ cl->defmap = (cl->defmap & ~mask) | (def & mask);
cbq_sync_defmap(cl);
}
@@ -1136,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this)
qdisc_class_hash_remove(&q->clhash, &this->common);
if (this->tparent) {
- clp=&this->sibling;
+ clp = &this->sibling;
cl = *clp;
do {
if (cl == this) {
@@ -1175,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this)
}
}
-static unsigned int cbq_drop(struct Qdisc* sch)
+static unsigned int cbq_drop(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
@@ -1183,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
- if ((cl_head = q->active[prio]) == NULL)
+ cl_head = q->active[prio];
+ if (!cl_head)
continue;
cl = cl_head;
@@ -1200,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
}
static void
-cbq_reset(struct Qdisc* sch)
+cbq_reset(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
int prio;
- unsigned h;
+ unsigned int h;
q->activemask = 0;
q->pmask = 0;
@@ -1238,21 +1244,21 @@ cbq_reset(struct Qdisc* sch)
static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
{
- if (lss->change&TCF_CBQ_LSS_FLAGS) {
- cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
- cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+ if (lss->change & TCF_CBQ_LSS_FLAGS) {
+ cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+ cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
}
- if (lss->change&TCF_CBQ_LSS_EWMA)
+ if (lss->change & TCF_CBQ_LSS_EWMA)
cl->ewma_log = lss->ewma_log;
- if (lss->change&TCF_CBQ_LSS_AVPKT)
+ if (lss->change & TCF_CBQ_LSS_AVPKT)
cl->avpkt = lss->avpkt;
- if (lss->change&TCF_CBQ_LSS_MINIDLE)
+ if (lss->change & TCF_CBQ_LSS_MINIDLE)
cl->minidle = -(long)lss->minidle;
- if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+ if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
cl->maxidle = lss->maxidle;
cl->avgidle = lss->maxidle;
}
- if (lss->change&TCF_CBQ_LSS_OFFTIME)
+ if (lss->change & TCF_CBQ_LSS_OFFTIME)
cl->offtime = lss->offtime;
return 0;
}
@@ -1280,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
if (wrr->weight)
cl->weight = wrr->weight;
if (wrr->priority) {
- cl->priority = wrr->priority-1;
+ cl->priority = wrr->priority - 1;
cl->cpriority = cl->priority;
if (cl->priority >= cl->priority2)
- cl->priority2 = TC_CBQ_MAXPRIO-1;
+ cl->priority2 = TC_CBQ_MAXPRIO - 1;
}
cbq_addprio(q, cl);
@@ -1300,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
cl->overlimit = cbq_ovl_delay;
break;
case TC_CBQ_OVL_LOWPRIO:
- if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
- ovl->priority2-1 <= cl->priority)
+ if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+ ovl->priority2 - 1 <= cl->priority)
return -EINVAL;
- cl->priority2 = ovl->priority2-1;
+ cl->priority2 = ovl->priority2 - 1;
cl->overlimit = cbq_ovl_lowprio;
break;
case TC_CBQ_OVL_DROP:
@@ -1382,9 +1388,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->link.q)
q->link.q = &noop_qdisc;
- q->link.priority = TC_CBQ_MAXPRIO-1;
- q->link.priority2 = TC_CBQ_MAXPRIO-1;
- q->link.cpriority = TC_CBQ_MAXPRIO-1;
+ q->link.priority = TC_CBQ_MAXPRIO - 1;
+ q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+ q->link.cpriority = TC_CBQ_MAXPRIO - 1;
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1415,7 +1421,7 @@ put_rtab:
return err;
}
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -1427,7 +1433,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_lssopt opt;
@@ -1452,15 +1458,15 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
opt.flags = 0;
opt.allot = cl->allot;
- opt.priority = cl->priority+1;
- opt.cpriority = cl->cpriority+1;
+ opt.priority = cl->priority + 1;
+ opt.cpriority = cl->cpriority + 1;
opt.weight = cl->weight;
NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
return skb->len;
@@ -1470,13 +1476,13 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt;
opt.strategy = cl->ovl_strategy;
- opt.priority2 = cl->priority2+1;
+ opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1487,7 +1493,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_fopt opt;
@@ -1506,7 +1512,7 @@ nla_put_failure:
}
#ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt;
@@ -1570,7 +1576,7 @@ static int
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
struct nlattr *nest;
if (cl->tparent)
@@ -1598,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->qstats.qlen = cl->q->q.qlen;
cl->xstats.avgidle = cl->avgidle;
@@ -1618,7 +1624,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue,
@@ -1641,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return 0;
}
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
return cl->q;
}
@@ -1683,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
kfree(cl);
}
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct hlist_node *n, *next;
struct cbq_class *cl;
- unsigned h;
+ unsigned int h;
#ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL;
@@ -1713,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch)
static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -1736,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
{
int err;
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)*arg;
+ struct cbq_class *cl = (struct cbq_class *)*arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_CBQ_MAX + 1];
struct cbq_class *parent;
@@ -1828,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (classid) {
err = -EINVAL;
- if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+ if (TC_H_MAJ(classid ^ sch->handle) ||
+ cbq_class_lookup(q, classid))
goto failure;
} else {
int i;
- classid = TC_H_MAKE(sch->handle,0x8000);
+ classid = TC_H_MAKE(sch->handle, 0x8000);
- for (i=0; i<0x8000; i++) {
+ for (i = 0; i < 0x8000; i++) {
if (++q->hgenerator >= 0x8000)
q->hgenerator = 1;
if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1891,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->minidle = -0x7FFFFFFF;
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- if (cl->ewma_log==0)
+ if (cl->ewma_log == 0)
cl->ewma_log = q->link.ewma_log;
- if (cl->maxidle==0)
+ if (cl->maxidle == 0)
cl->maxidle = q->link.maxidle;
- if (cl->avpkt==0)
+ if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1921,7 +1926,7 @@ failure:
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen;
if (cl->filters || cl->children || cl == &q->link)
@@ -1979,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *p = (struct cbq_class*)parent;
+ struct cbq_class *p = (struct cbq_class *)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
@@ -1993,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->filters--;
}
@@ -2003,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644
index 000000000000..ee1e2090eebe
--- /dev/null
+++ b/net/sched/sch_choke.c
@@ -0,0 +1,677 @@
+/*
+ * net/sched/sch_choke.c CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+ CHOKe stateless AQM for fair bandwidth allocation
+ =================================================
+
+ CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+ unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+ maintains no flow state. The difference from RED is an additional step
+ during the enqueuing process. If average queue size is over the
+ low threshold (qmin), a packet is chosen at random from the queue.
+ If both the new and chosen packet are from the same flow, both
+ are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+ needs to access packets in queue randomly. It has a minimal class
+ interface to allow overriding the builtin flow classifier with
+ filters.
+
+ Source:
+ R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+ Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+ IEEE INFOCOM, 2000.
+
+ A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+ Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE (128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+ u32 limit;
+ unsigned char flags;
+
+ struct red_parms parms;
+
+/* Variables */
+ struct tcf_proto *filter_list;
+ struct {
+ u32 prob_drop; /* Early probability drops */
+ u32 prob_mark; /* Early probability marks */
+ u32 forced_drop; /* Forced drops, qavg > max_thresh */
+ u32 forced_mark; /* Forced marks, qavg > max_thresh */
+ u32 pdrop; /* Drops due to queue limits */
+ u32 other; /* Drops due to drop() calls */
+ u32 matched; /* Drops to flow match */
+ } stats;
+
+ unsigned int head;
+ unsigned int tail;
+
+ unsigned int tab_mask; /* size - 1 */
+
+ struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+ return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+ return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+ do {
+ q->head = (q->head + 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+ do {
+ q->tail = (q->tail - 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = q->tab[idx];
+
+ q->tab[idx] = NULL;
+
+ if (idx == q->head)
+ choke_zap_head_holes(q);
+ if (idx == q->tail)
+ choke_zap_tail_holes(q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ qdisc_tree_decrease_qlen(sch, 1);
+ --sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ * Returns true only if source and destination address and port match.
+ * false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+ struct sk_buff *skb2)
+{
+ int off1, off2, poff;
+ const u32 *ports1, *ports2;
+ u8 ip_proto;
+ __u32 hash1;
+
+ if (skb1->protocol != skb2->protocol)
+ return false;
+
+ /* Use hash value as quick check
+ * Assumes that __skb_get_rxhash makes IP header and ports linear
+ */
+ hash1 = skb_get_rxhash(skb1);
+ if (!hash1 || hash1 != skb_get_rxhash(skb2))
+ return false;
+
+ /* Probably match, but be sure to avoid hash collisions */
+ off1 = skb_network_offset(skb1);
+ off2 = skb_network_offset(skb2);
+
+ switch (skb1->protocol) {
+ case __constant_htons(ETH_P_IP): {
+ const struct iphdr *ip1, *ip2;
+
+ ip1 = (const struct iphdr *) (skb1->data + off1);
+ ip2 = (const struct iphdr *) (skb2->data + off2);
+
+ ip_proto = ip1->protocol;
+ if (ip_proto != ip2->protocol ||
+ ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+ return false;
+
+ if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+ ip_proto = 0;
+ off1 += ip1->ihl * 4;
+ off2 += ip2->ihl * 4;
+ break;
+ }
+
+ case __constant_htons(ETH_P_IPV6): {
+ const struct ipv6hdr *ip1, *ip2;
+
+ ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+ ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+ ip_proto = ip1->nexthdr;
+ if (ip_proto != ip2->nexthdr ||
+ ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+ ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+ return false;
+ off1 += 40;
+ off2 += 40;
+ }
+
+ default: /* Maybe compare MAC header here? */
+ return false;
+ }
+
+ poff = proto_ports_offset(ip_proto);
+ if (poff < 0)
+ return true;
+
+ off1 += poff;
+ off2 += poff;
+
+ ports1 = (__force u32 *)(skb1->data + off1);
+ ports2 = (__force u32 *)(skb2->data + off2);
+ return *ports1 == *ports2;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+ *(unsigned int *)(qdisc_skb_cb(skb)->data) = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+ return *(unsigned int *)(qdisc_skb_cb(skb)->data);
+}
+
+/*
+ * Classify flow using either:
+ * 1. pre-existing classification result in skb
+ * 2. fast internal classification
+ * 3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+ struct Qdisc *sch, int *qerr)
+
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return false;
+ }
+#endif
+ choke_set_classid(skb, TC_H_MIN(res.classid));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ * times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+ unsigned int *pidx)
+{
+ struct sk_buff *skb;
+ int retrys = 3;
+
+ do {
+ *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+ skb = q->tab[*pidx];
+ if (skb)
+ return skb;
+ } while (--retrys > 0);
+
+ return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+ struct sk_buff *nskb,
+ unsigned int *pidx)
+{
+ struct sk_buff *oskb;
+
+ if (q->head == q->tail)
+ return false;
+
+ oskb = choke_peek_random(q, pidx);
+ if (q->filter_list)
+ return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+ return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct red_parms *p = &q->parms;
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+ if (q->filter_list) {
+ /* If using external classifiers, get result and record it. */
+ if (!choke_classify(skb, sch, &ret))
+ goto other_drop; /* Packet was eaten by filter */
+ }
+
+ /* Compute average queue usage (see RED) */
+ p->qavg = red_calc_qavg(p, sch->q.qlen);
+ if (red_is_idling(p))
+ red_end_of_idle_period(p);
+
+ /* Is queue small? */
+ if (p->qavg <= p->qth_min)
+ p->qcount = -1;
+ else {
+ unsigned int idx;
+
+ /* Draw a packet at random from queue and compare flow */
+ if (choke_match_random(q, skb, &idx)) {
+ q->stats.matched++;
+ choke_drop_by_idx(sch, idx);
+ goto congestion_drop;
+ }
+
+ /* Queue is large, always mark/drop */
+ if (p->qavg > p->qth_max) {
+ p->qcount = -1;
+
+ sch->qstats.overlimits++;
+ if (use_harddrop(q) || !use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ } else if (++p->qcount) {
+ if (red_mark_probability(p, p->qavg)) {
+ p->qcount = 0;
+ p->qR = red_random(p);
+
+ sch->qstats.overlimits++;
+ if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ }
+ } else
+ p->qR = red_random(p);
+ }
+
+ /* Admit new packet */
+ if (sch->q.qlen < q->limit) {
+ q->tab[q->tail] = skb;
+ q->tail = (q->tail + 1) & q->tab_mask;
+ ++sch->q.qlen;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ q->stats.pdrop++;
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+
+ congestion_drop:
+ qdisc_drop(skb, sch);
+ return NET_XMIT_CN;
+
+ other_drop:
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ if (q->head == q->tail) {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ return NULL;
+ }
+
+ skb = q->tab[q->head];
+ q->tab[q->head] = NULL;
+ choke_zap_head_holes(q);
+ --sch->q.qlen;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+
+ return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ unsigned int len;
+
+ len = qdisc_queue_drop(sch);
+ if (len > 0)
+ q->stats.other++;
+ else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
+
+ return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+ [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
+ [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CHOKE_MAX + 1];
+ const struct tc_red_qopt *ctl;
+ int err;
+ struct sk_buff **old = NULL;
+ unsigned int mask;
+
+ if (opt == NULL)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CHOKE_PARMS] == NULL ||
+ tb[TCA_CHOKE_STAB] == NULL)
+ return -EINVAL;
+
+ ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+ if (ctl->limit > CHOKE_MAX_QUEUE)
+ return -EINVAL;
+
+ mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+ if (mask != q->tab_mask) {
+ struct sk_buff **ntab;
+
+ ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+ if (!ntab)
+ ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+ if (!ntab)
+ return -ENOMEM;
+
+ sch_tree_lock(sch);
+ old = q->tab;
+ if (old) {
+ unsigned int oqlen = sch->q.qlen, tail = 0;
+
+ while (q->head != q->tail) {
+ struct sk_buff *skb = q->tab[q->head];
+
+ q->head = (q->head + 1) & q->tab_mask;
+ if (!skb)
+ continue;
+ if (tail < mask) {
+ ntab[tail++] = skb;
+ continue;
+ }
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ --sch->q.qlen;
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+ q->head = 0;
+ q->tail = tail;
+ }
+
+ q->tab_mask = mask;
+ q->tab = ntab;
+ } else
+ sch_tree_lock(sch);
+
+ q->flags = ctl->flags;
+ q->limit = ctl->limit;
+
+ red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Plog, ctl->Scell_log,
+ nla_data(tb[TCA_CHOKE_STAB]));
+
+ if (q->head == q->tail)
+ red_end_of_idle_period(&q->parms);
+
+ sch_tree_unlock(sch);
+ choke_free(old);
+ return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts = NULL;
+ struct tc_red_qopt opt = {
+ .limit = q->limit,
+ .flags = q->flags,
+ .qth_min = q->parms.qth_min >> q->parms.Wlog,
+ .qth_max = q->parms.qth_max >> q->parms.Wlog,
+ .Wlog = q->parms.Wlog,
+ .Plog = q->parms.Plog,
+ .Scell_log = q->parms.Scell_log,
+ };
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tc_choke_xstats st = {
+ .early = q->stats.prob_drop + q->stats.forced_drop,
+ .marked = q->stats.prob_mark + q->stats.forced_mark,
+ .pdrop = q->stats.pdrop,
+ .other = q->stats.other,
+ .matched = q->stats.matched,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ if (!arg->stop) {
+ if (arg->fn(sch, 1, arg) < 0) {
+ arg->stop = 1;
+ return;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+ .leaf = choke_leaf,
+ .get = choke_get,
+ .put = choke_put,
+ .tcf_chain = choke_find_tcf,
+ .bind_tcf = choke_bind,
+ .unbind_tcf = choke_put,
+ .dump = choke_dump_class,
+ .walk = choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+ .id = "choke",
+ .priv_size = sizeof(struct choke_sched_data),
+
+ .enqueue = choke_enqueue,
+ .dequeue = choke_dequeue,
+ .peek = choke_peek_head,
+ .drop = choke_drop,
+ .init = choke_init,
+ .destroy = choke_destroy,
+ .reset = choke_reset,
+ .change = choke_change,
+ .dump = choke_dump,
+ .dump_stats = choke_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+ return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+ unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e642eafc..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd4408e..2c790204d042 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
if (tb[TCA_DSMARK_VALUE])
- p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+ p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
if (tb[TCA_DSMARK_MASK])
- p->mask[*arg-1] = mask;
+ p->mask[*arg - 1] = mask;
err = 0;
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
if (!dsmark_valid_index(p, arg))
return -EINVAL;
- p->mask[arg-1] = 0xff;
- p->value[arg-1] = 0;
+ p->mask[arg - 1] = 0xff;
+ p->value[arg - 1] = 0;
return 0;
}
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (p->mask[i] == 0xff && !p->value[i])
goto ignore;
if (walker->count >= walker->skip) {
- if (walker->fn(sch, i+1, walker) < 0) {
+ if (walker->fn(sch, i + 1, walker) < 0) {
walker->stop = 1;
break;
}
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass.
*/
if (p->mask[index] != 0xff || p->value[index])
- printk(KERN_WARNING
- "dsmark_dequeue: unsupported protocol %d\n",
- ntohs(skb->protocol));
+ pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+ ntohs(skb->protocol));
break;
}
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
if (!dsmark_valid_index(p, cl))
return -EINVAL;
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
tcm->tcm_info = p->q->handle;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
- NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d6337e43c..be33f9ddf9dd 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,12 +19,11 @@
/* 1 band FIFO pseudo-"scheduler" */
-struct fifo_sched_data
-{
+struct fifo_sched_data {
u32 limit;
};
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -44,19 +43,16 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
+ __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++;
- kfree_skb(skb_head);
-
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
@@ -65,11 +61,13 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
{
struct fifo_sched_data *q = qdisc_priv(sch);
+ bool bypass;
+ bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
if (opt == NULL) {
u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
- if (sch->ops == &bfifo_qdisc_ops)
+ if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
q->limit = limit;
@@ -82,6 +80,15 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = ctl->limit;
}
+ if (is_bfifo)
+ bypass = q->limit >= psched_mtu(qdisc_dev(sch));
+ else
+ bypass = q->limit >= 1;
+
+ if (bypass)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..0da09d508737 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
kfree_skb(skb);
if (net_ratelimit())
- printk(KERN_WARNING "Dead loop on netdevice %s, "
- "fix it urgently!\n", dev_queue->dev->name);
+ pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+ dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
- printk(KERN_WARNING "BUG %s code %d qlen %d\n",
- dev->name, ret, q->q.qlen);
+ pr_warning("BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
};
-static const u8 prio2band[TC_PRIO_MAX+1] =
- { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band;
}
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
return NULL;
}
-static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
return NULL;
}
-static void pfifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
skb_queue_head_init(band2list(priv, prio));
+ /* Can by-pass the queue discipline */
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
@@ -540,6 +543,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
+EXPORT_SYMBOL(pfifo_fast_ops);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops)
@@ -630,7 +634,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
#ifdef CONFIG_NET_SCHED
qdisc_list_del(qdisc);
- qdisc_put_stab(qdisc->stab);
+ qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
if (ops->reset)
@@ -674,25 +678,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
return oqdisc;
}
+EXPORT_SYMBOL(dev_graft_qdisc);
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
- printk(KERN_INFO "%s: activation failed\n", dev->name);
+ netdev_info(dev, "activation failed\n");
return;
}
-
- /* Can by-pass the queue discipline for default qdisc */
- qdisc->flags |= TCQ_F_CAN_BYPASS;
- } else {
- qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
@@ -761,6 +761,7 @@ void dev_activate(struct net_device *dev)
dev_watchdog_up(dev);
}
}
+EXPORT_SYMBOL(dev_activate);
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
@@ -840,6 +841,7 @@ void dev_deactivate(struct net_device *dev)
list_add(&dev->unreg_list, &single);
dev_deactivate_many(&single);
}
+EXPORT_SYMBOL(dev_deactivate);
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 51dcc2aa5c92..b9493a09a870 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -32,8 +32,7 @@
struct gred_sched_data;
struct gred_sched;
-struct gred_sched_data
-{
+struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE,
};
-struct gred_sched
-{
+struct gred_sched {
struct gred_sched_data *tab[MAX_DPs];
unsigned long flags;
u32 red_flags;
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP;
}
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched_data *q = NULL;
+ struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb);
- if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def;
- if ((q = t->tab[dp]) == NULL) {
+ q = t->tab[dp];
+ if (!q) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->parms))
- qavg +=t->tab[i]->parms.qavg;
+ qavg += t->tab[i]->parms.qavg;
}
}
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
gred_store_wred_set(t, q);
switch (red_action(&q->parms, q->parms.qavg + qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+ q->stats.forced_mark++;
+ break;
}
if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff *gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x after dequeue, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "after dequeue, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
return NULL;
}
-static unsigned int gred_drop(struct Qdisc* sch)
+static unsigned int gred_drop(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x while dropping, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "while dropping, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
}
-static void gred_reset(struct Qdisc* sch)
+static void gred_reset(struct Qdisc *sch)
{
int i;
struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
- printk(KERN_WARNING "GRED: Warning: Destroying "
- "shadowed VQ 0x%x\n", i);
+ pr_warning("GRED: Warning: Destroying "
+ "shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791d4f6c..6488e6425652 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures.
*/
-struct internal_sc
-{
+struct internal_sc {
u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
};
/* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */
};
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
HFSC_RSC = 0x1,
HFSC_FSC = 0x2,
HFSC_USC = 0x4
};
-struct hfsc_class
-{
+struct hfsc_class {
struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */
@@ -140,8 +136,8 @@ struct hfsc_class
u64 cl_cumul; /* cumulative work in bytes done by
real-time criteria */
- u64 cl_d; /* deadline*/
- u64 cl_e; /* eligible time */
+ u64 cl_d; /* deadline*/
+ u64 cl_e; /* eligible time */
u64 cl_vt; /* virtual time */
u64 cl_f; /* time when this class will fit for
link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */
};
-struct hfsc_sched
-{
+struct hfsc_sched {
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (go_active) {
n = rb_last(&cl->cl_parent->vt_tree);
if (n != NULL) {
- max_cl = rb_entry(n, struct hfsc_class,vt_node);
+ max_cl = rb_entry(n, struct hfsc_class, vt_node);
/*
* set vt to the average of the min and max
* classes. if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL;
}
#endif
- if ((cl = (struct hfsc_class *)res.class) == NULL) {
- if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+ cl = (struct hfsc_class *)res.class;
+ if (!cl) {
+ cl = hfsc_find_class(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
if (cl->level >= head->level)
break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1;
}
-static inline int
+static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{
if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl;
u64 next_time = 0;
- if ((cl = eltree_get_minel(q)) != NULL)
+ cl = eltree_get_minel(q);
+ if (cl)
next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1600,7 +1598,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1626,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among
* the eligible classes.
*/
- if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+ cl = eltree_get_mindl(q, cur_time);
+ if (cl) {
realtime = 1;
} else {
/*
@@ -1665,7 +1663,8 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0c6836..e1429a85091f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -99,9 +99,10 @@ struct htb_class {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
+ * parent's feed then we lost ptr value and start from the
+ * first child again. Here we store classid of the
+ * last valid ptr (used when ptr is NULL).
+ */
u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue.
*/
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int result;
/* allow to select class by setting skb->priority to valid classid;
- note that nfmark can be used too by attaching filter fw with no
- rules in it */
+ * note that nfmark can be used too by attaching filter fw with no
+ * rules in it
+ */
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
- if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+ cl = htb_find(skb->priority, sch);
+ if (cl && cl->level == 0)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL;
}
#endif
- if ((cl = (void *)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */
- if ((cl = htb_find(res.classid, sch)) == NULL)
+ cl = htb_find(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
}
if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that
- reset bit in mask as parent is already ok */
+ * reset bit in mask as parent is already ok
+ */
mask &= ~(1 << prio);
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from
- parent feed - forget the pointer but remember
- classid */
+ * parent feed - forget the pointer but remember
+ * classid
+ */
p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL;
}
@@ -574,7 +581,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -664,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
- 1 to simplify things when jiffy is going to be incremented
- too soon */
+ * 1 to simplify things when jiffy is going to be incremented
+ * too soon
+ */
unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
@@ -688,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
- printk(KERN_WARNING "htb: too many events!\n");
+ pr_warning("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
@@ -696,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- is no such one exists. */
+ * is no such one exists.
+ */
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id)
{
@@ -740,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
- the original or next ptr */
+ * the original or next ptr
+ */
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
+ * can become out of date quickly
+ */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
@@ -773,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
}
/* dequeues packet at given priority and level; call only if
- you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level)
{
@@ -790,9 +801,10 @@ next:
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
- qdisc drops packets in enqueue routine or if someone used
- graft operation on the leaf since last dequeue;
- simply deactivate and skip such class */
+ * qdisc drops packets in enqueue routine or if someone used
+ * graft operation on the leaf since last dequeue;
+ * simply deactivate and skip such class
+ */
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
@@ -832,7 +844,8 @@ next:
ptr[0]) + prio);
}
/* this used to be after charge_class but this constelation
- gives us slightly better performance */
+ * gives us slightly better performance
+ */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
@@ -842,7 +855,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -851,7 +864,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
- sch->flags &= ~TCQ_F_THROTTLED;
+ok:
+ qdisc_bstats_update(sch, skb);
+ qdisc_unthrottled(sch);
sch->q.qlen--;
return skb;
}
@@ -882,13 +897,11 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
+
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
@@ -989,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
return err;
if (tb[TCA_HTB_INIT] == NULL) {
- printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+ pr_err("HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) {
- printk(KERN_ERR
- "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+ pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL;
}
@@ -1208,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
- and surprisingly it worked in 2.4. But it must precede it
- because filter need its target class alive to be able to call
- unbind_filter on it (without Oops). */
+ * and surprisingly it worked in 2.4. But it must precede it
+ * because filter need its target class alive to be able to call
+ * unbind_filter on it (without Oops).
+ */
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1344,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
- printk(KERN_ERR "htb: tree is too deep\n");
+ pr_err("htb: tree is too deep\n");
goto failure;
}
err = -ENOBUFS;
- if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
goto failure;
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1368,8 +1382,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
- so that can't be used inside of sch_tree_lock
- -- thanks to Karlis Peisenieks */
+ * so that can't be used inside of sch_tree_lock
+ * -- thanks to Karlis Peisenieks
+ */
new_q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
sch_tree_lock(sch);
@@ -1421,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
/* it used to be a nasty bug here, we have to check that node
- is really leaf before changing cl->un.leaf ! */
+ * is really leaf before changing cl->un.leaf !
+ */
if (!cl->level) {
cl->quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->quantum < 1000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is big. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 200000;
@@ -1480,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
- The line above used to be there to prevent attaching filters to
- leaves. But at least tc_index filter uses this just to get class
- for other reasons so that we have to allow for it.
- ----
- 19.6.2002 As Werner explained it is ok - bind filter is just
- another way to "lock" the class - unlike "get" this lock can
- be broken by class during destroy IIUC.
+ * The line above used to be there to prevent attaching filters to
+ * leaves. But at least tc_index filter uses this just to get class
+ * for other reasons so that we have to allow for it.
+ * ----
+ * 19.6.2002 As Werner explained it is ok - bind filter is just
+ * another way to "lock" the class - unlike "get" this lock can
+ * be broken by class during destroy IIUC.
*/
if (cl)
cl->filter_cnt++;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ecc302f4d2a1..ec5cbc848963 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -61,7 +61,6 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
goto err;
- qdisc->flags |= TCQ_F_CAN_BYPASS;
priv->qdiscs[ntx] = qdisc;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644
index 000000000000..effd4ee0e880
--- /dev/null
+++ b/net/sched/sch_mqprio.c
@@ -0,0 +1,416 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+ struct Qdisc **qdiscs;
+ int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned int ntx;
+
+ if (!priv->qdiscs)
+ return;
+
+ for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
+ qdisc_destroy(priv->qdiscs[ntx]);
+
+ if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+ dev->netdev_ops->ndo_setup_tc(dev, 0);
+ else
+ netdev_set_num_tc(dev, 0);
+
+ kfree(priv->qdiscs);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+ int i, j;
+
+ /* Verify num_tc is not out of max range */
+ if (qopt->num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ /* Verify priority mapping uses valid tcs */
+ for (i = 0; i < TC_BITMASK + 1; i++) {
+ if (qopt->prio_tc_map[i] >= qopt->num_tc)
+ return -EINVAL;
+ }
+
+ /* net_device does not support requested operation */
+ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+ return -EINVAL;
+
+ /* if hw owned qcount and qoffset are taken from LLD so
+ * no reason to verify them here
+ */
+ if (qopt->hw)
+ return 0;
+
+ for (i = 0; i < qopt->num_tc; i++) {
+ unsigned int last = qopt->offset[i] + qopt->count[i];
+
+ /* Verify the queue count is in tx range being equal to the
+ * real_num_tx_queues indicates the last queue is in use.
+ */
+ if (qopt->offset[i] >= dev->real_num_tx_queues ||
+ !qopt->count[i] ||
+ last > dev->real_num_tx_queues)
+ return -EINVAL;
+
+ /* Verify that the offset and counts do not overlap */
+ for (j = i + 1; j < qopt->num_tc; j++) {
+ if (last > qopt->offset[j])
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+ int i, err = -EOPNOTSUPP;
+ struct tc_mqprio_qopt *qopt = NULL;
+
+ BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+ BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ if (nla_len(opt) < sizeof(*qopt))
+ return -EINVAL;
+
+ qopt = nla_data(opt);
+ if (mqprio_parse_opt(dev, qopt))
+ return -EINVAL;
+
+ /* pre-allocate qdisc, attachment can't fail */
+ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ GFP_KERNEL);
+ if (priv->qdiscs == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ dev_queue = netdev_get_tx_queue(dev, i);
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1)));
+ if (qdisc == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ priv->qdiscs[i] = qdisc;
+ }
+
+ /* If the mqprio options indicate that hardware should own
+ * the queue mapping then run ndo_setup_tc otherwise use the
+ * supplied and verified mapping
+ */
+ if (qopt->hw) {
+ priv->hw_owned = 1;
+ err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+ if (err)
+ goto err;
+ } else {
+ netdev_set_num_tc(dev, qopt->num_tc);
+ for (i = 0; i < qopt->num_tc; i++)
+ netdev_set_tc_queue(dev, i,
+ qopt->count[i], qopt->offset[i]);
+ }
+
+ /* Always use supplied priority mappings */
+ for (i = 0; i < TC_BITMASK + 1; i++)
+ netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+ sch->flags |= TCQ_F_MQROOT;
+ return 0;
+
+err:
+ mqprio_destroy(sch);
+ return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ /* Attach underlying qdisc */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = priv->qdiscs[ntx];
+ qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (qdisc)
+ qdisc_destroy(qdisc);
+ }
+ kfree(priv->qdiscs);
+ priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+ unsigned long cl)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+ if (ntx >= dev->num_tx_queues)
+ return NULL;
+ return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return -EINVAL;
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ *old = dev_graft_qdisc(dev_queue, new);
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+
+ return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_mqprio_qopt opt = { 0 };
+ struct Qdisc *qdisc;
+ unsigned int i;
+
+ sch->q.qlen = 0;
+ memset(&sch->bstats, 0, sizeof(sch->bstats));
+ memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.qlen += qdisc->qstats.qlen;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+
+ opt.num_tc = netdev_get_num_tc(dev);
+ memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+ opt.hw = priv->hw_owned;
+
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ opt.count[i] = dev->tc_to_txq[i].count;
+ opt.offset[i] = dev->tc_to_txq[i].offset;
+ }
+
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ return skb->len;
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return NULL;
+
+ return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx = TC_H_MIN(classid);
+
+ if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+ return 0;
+ return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_info = 0;
+ } else {
+ int i;
+ struct netdev_queue *dev_queue;
+
+ dev_queue = mqprio_queue_get(sch, cl);
+ tcm->tcm_parent = 0;
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ struct netdev_tc_txq tc = dev->tc_to_txq[i];
+ int q_idx = cl - netdev_get_num_tc(dev);
+
+ if (q_idx > tc.offset &&
+ q_idx <= tc.offset + tc.count) {
+ tcm->tcm_parent =
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1));
+ break;
+ }
+ }
+ tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ }
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ int i;
+ struct Qdisc *qdisc;
+ struct gnet_stats_queue qstats = {0};
+ struct gnet_stats_basic_packed bstats = {0};
+ struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+ /* Drop lock here it will be reclaimed before touching
+ * statistics this is required because the d->lock we
+ * hold here is the look on dev_queue->qdisc_sleeping
+ * also acquired below.
+ */
+ spin_unlock_bh(d->lock);
+
+ for (i = tc.offset; i < tc.offset + tc.count; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ bstats.bytes += qdisc->bstats.bytes;
+ bstats.packets += qdisc->bstats.packets;
+ qstats.qlen += qdisc->qstats.qlen;
+ qstats.backlog += qdisc->qstats.backlog;
+ qstats.drops += qdisc->qstats.drops;
+ qstats.requeues += qdisc->qstats.requeues;
+ qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ /* Reclaim root sleeping lock before completing stats */
+ spin_lock_bh(d->lock);
+ if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+ gnet_stats_copy_queue(d, &qstats) < 0)
+ return -1;
+ } else {
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ sch = dev_queue->qdisc_sleeping;
+ sch->qstats.qlen = sch->q.qlen;
+ if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx;
+
+ if (arg->stop)
+ return;
+
+ /* Walk hierarchy with a virtual class per tc */
+ arg->count = arg->skip;
+ for (ntx = arg->skip;
+ ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+ ntx++) {
+ if (arg->fn(sch, ntx + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+ .graft = mqprio_graft,
+ .leaf = mqprio_leaf,
+ .get = mqprio_get,
+ .put = mqprio_put,
+ .walk = mqprio_walk,
+ .dump = mqprio_dump_class,
+ .dump_stats = mqprio_dump_class_stats,
+};
+
+struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+ .cl_ops = &mqprio_class_ops,
+ .id = "mqprio",
+ .priv_size = sizeof(struct mqprio_sched),
+ .init = mqprio_init,
+ .destroy = mqprio_destroy,
+ .attach = mqprio_attach,
+ .dump = mqprio_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+ return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+ unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da24763..edc1950e0e77 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
unsigned int len;
struct Qdisc *qdisc;
- for (band = q->bands-1; band >= 0; band--) {
+ for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band];
if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- err = multiq_tune(sch,opt);
+ err = multiq_tune(sch, opt);
if (err)
kfree(q->queues);
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
- tcm->tcm_info = q->queues[cl-1]->handle;
+ tcm->tcm_info = q->queues[cl - 1]->handle;
return 0;
}
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, band+1, arg) < 0) {
+ if (arg->fn(sch, band + 1, arg) < 0) {
arg->stop = 1;
break;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce863479..64f0d3293b49 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
cb = netem_skb_cb(skb);
- if (q->gap == 0 || /* not doing reordering */
- q->counter < q->gap || /* inside last reordering gap */
+ if (q->gap == 0 || /* not doing reordering */
+ q->counter < q->gap || /* inside last reordering gap */
q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
psched_tdiff_t delay;
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -249,7 +248,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-static unsigned int netem_drop(struct Qdisc* sch)
+static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -266,7 +265,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- if (sch->flags & TCQ_F_THROTTLED)
+ if (qdisc_is_throttled(sch))
return NULL;
skb = q->qdisc->ops->peek(q->qdisc);
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d49dd1..2a318f2dc3e5 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -22,8 +22,7 @@
#include <net/pkt_sched.h>
-struct prio_sched_data
-{
+struct prio_sched_data {
int bands;
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band))
band = 0;
- return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+ return q->queues[q->prio2band[band & TC_PRIO_MAX]];
}
band = res.classid;
}
@@ -84,7 +83,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -107,7 +105,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
return NULL;
}
-static struct sk_buff *prio_dequeue(struct Qdisc* sch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -116,6 +114,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
}
-static unsigned int prio_drop(struct Qdisc* sch)
+static unsigned int prio_drop(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
static void
-prio_reset(struct Qdisc* sch)
+prio_reset(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
}
static void
-prio_destroy(struct Qdisc* sch)
+prio_destroy(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL;
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands)
return -EINVAL;
}
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_unlock(sch);
- for (i=0; i<q->bands; i++) {
+ for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old;
+
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
int i;
- for (i=0; i<TCQ_PRIO_BANDS; i++)
+ for (i = 0; i < TCQ_PRIO_BANDS; i++)
q->queues[i] = &noop_qdisc;
if (opt == NULL) {
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
} else {
int err;
- if ((err= prio_tune(sch, opt)) != 0)
+ if ((err = prio_tune(sch, opt)) != 0)
return err;
}
return 0;
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt;
opt.bands = q->bands;
- memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, prio+1, arg) < 0) {
+ if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1;
break;
}
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5a2c97..6649463da1b6 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -36,8 +36,7 @@
if RED works correctly.
*/
-struct red_sched_data
-{
+struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
struct red_parms parms;
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
-static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -67,34 +66,33 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
red_end_of_idle_period(&q->parms);
switch (red_action(&q->parms, q->parms.qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (red_use_harddrop(q) || !red_use_ecn(q) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
-
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (red_use_harddrop(q) || !red_use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ break;
}
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -107,22 +105,24 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -130,7 +130,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
return child->ops->peek(child);
}
-static unsigned int red_drop(struct Qdisc* sch)
+static unsigned int red_drop(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -149,7 +149,7 @@ static unsigned int red_drop(struct Qdisc* sch)
return 0;
}
-static void red_reset(struct Qdisc* sch)
+static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -216,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int red_init(struct Qdisc* sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53a634d..c2e628dfaacc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
#define SFQ_DEPTH 128 /* max number of packets per flow */
#define SFQ_SLOTS 128 /* max number of flows */
#define SFQ_EMPTY_SLOT 255
-#define SFQ_HASH_DIVISOR 1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
/* We use 16 bits to store allot, and want to handle packets up to 64K
* Scale allot by 8 (1<<3) so that no overflow occurs.
*/
@@ -92,8 +94,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array
*/
-struct sfq_head
-{
+struct sfq_head {
sfq_index next;
sfq_index prev;
};
@@ -108,13 +109,12 @@ struct sfq_slot {
short allot; /* credit for this slot */
};
-struct sfq_sched_data
-{
+struct sfq_sched_data {
/* Parameters */
int perturb_period;
- unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit;
-
+ unsigned int divisor; /* number of slots in hash table */
/* Variables */
struct tcf_proto *filter_list;
struct timer_list perturb_timer;
@@ -122,7 +122,7 @@ struct sfq_sched_data
sfq_index cur_depth; /* depth of longest slot */
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct sfq_slot *tail; /* current slot in round */
- sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
+ sfq_index *ht; /* Hash table (divisor slots) */
struct sfq_slot slots[SFQ_SLOTS];
struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
};
@@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS];
}
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
- return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+ return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
}
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
u32 h, h2;
@@ -157,13 +157,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
iph = ip_hdr(skb);
- h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+ h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
}
break;
}
@@ -181,7 +181,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (poff >= 0 &&
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
iph = ipv6_hdr(skb);
- h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+ h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
}
break;
}
@@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+ TC_H_MIN(skb->priority) <= q->divisor)
return TC_H_MIN(skb->priority);
if (!q->filter_list)
@@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return 0;
}
#endif
- if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+ if (TC_H_MIN(res.classid) <= q->divisor)
return TC_H_MIN(res.classid);
}
return 0;
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tail = slot;
slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- qdisc_bstats_update(sch, skb);
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
}
skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
@@ -492,13 +491,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
+ if (ctl->divisor &&
+ (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+ return -EINVAL;
+
sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = ctl->perturb_period * HZ;
if (ctl->limit)
q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+ if (ctl->divisor)
+ q->divisor = ctl->divisor;
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
sfq_drop(sch);
@@ -516,15 +520,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
+ size_t sz;
int i;
q->perturb_timer.function = sfq_perturbation;
q->perturb_timer.data = (unsigned long)sch;
init_timer_deferrable(&q->perturb_timer);
- for (i = 0; i < SFQ_HASH_DIVISOR; i++)
- q->ht[i] = SFQ_EMPTY_SLOT;
-
for (i = 0; i < SFQ_DEPTH; i++) {
q->dep[i].next = i + SFQ_SLOTS;
q->dep[i].prev = i + SFQ_SLOTS;
@@ -533,6 +535,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = SFQ_DEPTH - 1;
q->cur_depth = 0;
q->tail = NULL;
+ q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
if (opt == NULL) {
q->quantum = psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -544,10 +547,23 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
return err;
}
+ sz = sizeof(q->ht[0]) * q->divisor;
+ q->ht = kmalloc(sz, GFP_KERNEL);
+ if (!q->ht && sz > PAGE_SIZE)
+ q->ht = vmalloc(sz);
+ if (!q->ht)
+ return -ENOMEM;
+ for (i = 0; i < q->divisor; i++)
+ q->ht[i] = SFQ_EMPTY_SLOT;
+
for (i = 0; i < SFQ_SLOTS; i++) {
slot_queue_init(&q->slots[i]);
sfq_link(q, i);
}
+ if (q->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -558,6 +574,10 @@ static void sfq_destroy(struct Qdisc *sch)
tcf_destroy_chain(&q->filter_list);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
+ if (is_vmalloc_addr(q->ht))
+ vfree(q->ht);
+ else
+ kfree(q->ht);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -570,7 +590,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.perturb_period = q->perturb_period / HZ;
opt.limit = q->limit;
- opt.divisor = SFQ_HASH_DIVISOR;
+ opt.divisor = q->divisor;
opt.flows = q->limit;
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -595,6 +615,8 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -648,7 +670,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
if (arg->stop)
return;
- for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+ for (i = 0; i < q->divisor; i++) {
if (q->ht[i] == SFQ_EMPTY_SLOT ||
arg->count < arg->skip) {
arg->count++;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e721811..1dcfb5223a86 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -97,8 +97,7 @@
changed the limit is not effective anymore.
*/
-struct tbf_sched_data
-{
+struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@ struct tbf_sched_data
struct qdisc_watchdog watchdog; /* Watchdog timer */
};
-#define L2T(q,L) qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
-static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
@@ -134,11 +133,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
-static unsigned int tbf_drop(struct Qdisc* sch)
+static unsigned int tbf_drop(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -150,7 +148,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
return len;
}
-static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
@@ -186,7 +184,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->tokens = toks;
q->ptokens = ptoks;
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
+ qdisc_bstats_update(sch, skb);
return skb;
}
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return NULL;
}
-static void tbf_reset(struct Qdisc* sch)
+static void tbf_reset(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
};
-static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
int err;
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
struct qdisc_rate_table *rtab = NULL;
struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
- int max_size,n;
+ int max_size, n;
err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
if (err < 0)
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
}
for (n = 0; n < 256; n++)
- if (rtab->data[n] > qopt->buffer) break;
- max_size = (n << qopt->rate.cell_log)-1;
+ if (rtab->data[n] > qopt->buffer)
+ break;
+ max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) {
int size;
for (n = 0; n < 256; n++)
- if (ptab->data[n] > qopt->mtu) break;
- size = (n << qopt->peakrate.cell_log)-1;
- if (size < max_size) max_size = size;
+ if (ptab->data[n] > qopt->mtu)
+ break;
+ size = (n << qopt->peakrate.cell_log) - 1;
+ if (size < max_size)
+ max_size = size;
}
if (max_size < 0)
goto done;
@@ -310,7 +312,7 @@ done:
return err;
}
-static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
.graft = tbf_graft,
.leaf = tbf_leaf,
.get = tbf_get,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 84ce48eadff4..45cd30098e34 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -53,8 +53,7 @@
which will not break load balancing, though native slave
traffic will have the highest priority. */
-struct teql_master
-{
+struct teql_master {
struct Qdisc_ops qops;
struct net_device *dev;
struct Qdisc *slaves;
@@ -65,29 +64,27 @@ struct teql_master
unsigned long tx_dropped;
};
-struct teql_sched_data
-{
+struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
struct neighbour *ncache;
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
/* "teql*" qdisc routines */
static int
-teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -97,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
static struct sk_buff *
-teql_dequeue(struct Qdisc* sch)
+teql_dequeue(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
@@ -111,19 +108,21 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
}
static struct sk_buff *
-teql_peek(struct Qdisc* sch)
+teql_peek(struct Qdisc *sch)
{
/* teql is meant to be used as root qdisc */
return NULL;
}
-static __inline__ void
+static inline void
teql_neigh_release(struct neighbour *n)
{
if (n)
@@ -131,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
}
static void
-teql_reset(struct Qdisc* sch)
+teql_reset(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
@@ -141,13 +140,14 @@ teql_reset(struct Qdisc* sch)
}
static void
-teql_destroy(struct Qdisc* sch)
+teql_destroy(struct Qdisc *sch)
{
struct Qdisc *q, *prev;
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
- if ((prev = master->slaves) != NULL) {
+ prev = master->slaves;
+ if (prev) {
do {
q = NEXT_SLAVE(prev);
if (q == sch) {
@@ -179,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct net_device *dev = qdisc_dev(sch);
- struct teql_master *m = (struct teql_master*)sch->ops;
+ struct teql_master *m = (struct teql_master *)sch->ops;
struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len)
@@ -290,7 +290,8 @@ restart:
nores = 0;
busy = 0;
- if ((q = start) == NULL)
+ q = start;
+ if (!q)
goto drop;
do {
@@ -355,10 +356,10 @@ drop:
static int teql_master_open(struct net_device *dev)
{
- struct Qdisc * q;
+ struct Qdisc *q;
struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE;
- unsigned flags = IFF_NOARP|IFF_MULTICAST;
+ unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL)
return -EUNATCH;
@@ -426,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
do {
if (new_mtu > qdisc_dev(q)->mtu)
return -EINVAL;
- } while ((q=NEXT_SLAVE(q)) != m->slaves);
+ } while ((q = NEXT_SLAVE(q)) != m->slaves);
}
dev->mtu = new_mtu;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a09b0dd25f50..8e02550ff3e8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3428,7 +3428,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
@@ -5333,7 +5333,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
optlen);
break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..d8d98d5b508c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1475,6 +1475,12 @@ restart:
goto out_free;
}
+ if (sk_filter(other, skb) < 0) {
+ /* Toss the packet but do not return any error to the sender */
+ err = len;
+ goto out_free;
+ }
+
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
@@ -1978,36 +1984,38 @@ static int unix_shutdown(struct socket *sock, int mode)
mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
- if (mode) {
- unix_state_lock(sk);
- sk->sk_shutdown |= mode;
- other = unix_peer(sk);
- if (other)
- sock_hold(other);
- unix_state_unlock(sk);
- sk->sk_state_change(sk);
-
- if (other &&
- (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
- int peer_mode = 0;
-
- if (mode&RCV_SHUTDOWN)
- peer_mode |= SEND_SHUTDOWN;
- if (mode&SEND_SHUTDOWN)
- peer_mode |= RCV_SHUTDOWN;
- unix_state_lock(other);
- other->sk_shutdown |= peer_mode;
- unix_state_unlock(other);
- other->sk_state_change(other);
- if (peer_mode == SHUTDOWN_MASK)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
- else if (peer_mode & RCV_SHUTDOWN)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- }
- if (other)
- sock_put(other);
+ if (!mode)
+ return 0;
+
+ unix_state_lock(sk);
+ sk->sk_shutdown |= mode;
+ other = unix_peer(sk);
+ if (other)
+ sock_hold(other);
+ unix_state_unlock(sk);
+ sk->sk_state_change(sk);
+
+ if (other &&
+ (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+ int peer_mode = 0;
+
+ if (mode&RCV_SHUTDOWN)
+ peer_mode |= SEND_SHUTDOWN;
+ if (mode&SEND_SHUTDOWN)
+ peer_mode |= RCV_SHUTDOWN;
+ unix_state_lock(other);
+ other->sk_shutdown |= peer_mode;
+ unix_state_unlock(other);
+ other->sk_state_change(other);
+ if (peer_mode == SHUTDOWN_MASK)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+ else if (peer_mode & RCV_SHUTDOWN)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
+ if (other)
+ sock_put(other);
+
return 0;
}
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 74944a2dd436..788a12c1eb5d 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -59,8 +59,6 @@
#include <asm/uaccess.h> /* copy_to/from_user */
#include <linux/init.h> /* __initfunc et al. */
-#define KMEM_SAFETYZONE 8
-
#define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev)))
/*
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee29063e5d..1f1ef70f34f2 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@ config CFG80211_DEBUGFS
If unsure, say N.
config CFG80211_INTERNAL_REGDB
- bool "use statically compiled regulatory rules database" if EMBEDDED
+ bool "use statically compiled regulatory rules database" if EXPERT
default n
depends on CFG80211
---help---
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5e1e0b08890..61291965c5f6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2189,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
- (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ (nlh->nlmsg_flags & NLM_F_DUMP)) {
if (link->dump == NULL)
return -EINVAL;
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 6c941050f573..1bf090a885fe 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -13,8 +13,8 @@ obj-y := \
request_key_auth.o \
user_defined.o
-obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 792c0a611a6d..07a5f35e3970 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -1,4 +1,4 @@
-/* compat.c: 32-bit compatibility syscall for 64-bit systems
+/* 32-bit compatibility syscall for 64-bit systems
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -14,13 +14,13 @@
#include <linux/compat.h>
#include "internal.h"
-/*****************************************************************************/
/*
- * the key control system call, 32-bit compatibility version for 64-bit archs
- * - this should only be called if the 64-bit arch uses weird pointers in
- * 32-bit mode or doesn't guarantee that the top 32-bits of the argument
- * registers on taking a 32-bit syscall are zero
- * - if you can, you should call sys_keyctl directly
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ *
+ * This should only be called if the 64-bit arch uses weird pointers in 32-bit
+ * mode or doesn't guarantee that the top 32-bits of the argument registers on
+ * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl()
+ * directly.
*/
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5)
@@ -88,5 +88,4 @@ asmlinkage long compat_sys_keyctl(u32 option,
default:
return -EOPNOTSUPP;
}
-
-} /* end compat_sys_keyctl() */
+}
diff --git a/security/keys/encrypted_defined.c b/security/keys/encrypted.c
index 32d27c858388..9e7e4ce3fae8 100644
--- a/security/keys/encrypted_defined.c
+++ b/security/keys/encrypted.c
@@ -30,7 +30,7 @@
#include <crypto/sha.h>
#include <crypto/aes.h>
-#include "encrypted_defined.h"
+#include "encrypted.h"
static const char KEY_TRUSTED_PREFIX[] = "trusted:";
static const char KEY_USER_PREFIX[] = "user:";
diff --git a/security/keys/encrypted_defined.h b/security/keys/encrypted.h
index cef5e2f2b7d1..cef5e2f2b7d1 100644
--- a/security/keys/encrypted_defined.h
+++ b/security/keys/encrypted.h
diff --git a/security/keys/gc.c b/security/keys/gc.c
index a46e825cbf02..89df6b5f203c 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -32,8 +32,8 @@ static time_t key_gc_next_run = LONG_MAX;
static time_t key_gc_new_timer;
/*
- * Schedule a garbage collection run
- * - precision isn't particularly important
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
*/
void key_schedule_gc(time_t gc_at)
{
@@ -61,8 +61,9 @@ static void key_gc_timer_func(unsigned long data)
}
/*
- * Garbage collect pointers from a keyring
- * - return true if we altered the keyring
+ * Garbage collect pointers from a keyring.
+ *
+ * Return true if we altered the keyring.
*/
static bool key_gc_keyring(struct key *keyring, time_t limit)
__releases(key_serial_lock)
@@ -107,9 +108,8 @@ do_gc:
}
/*
- * Garbage collector for keys
- * - this involves scanning the keyrings for dead, expired and revoked keys
- * that have overstayed their welcome
+ * Garbage collector for keys. This involves scanning the keyrings for dead,
+ * expired and revoked keys that have overstayed their welcome
*/
static void key_garbage_collector(struct work_struct *work)
{
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 56a133d8f37d..edfa50dbd6f5 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -1,4 +1,4 @@
-/* internal.h: authentication token and access key management internal defs
+/* Authentication token and access key management internal defs
*
* Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -35,10 +35,12 @@ extern struct key_type key_type_user;
/*****************************************************************************/
/*
- * keep track of keys for a user
- * - this needs to be separate to user_struct to avoid a refcount-loop
- * (user_struct pins some keyrings which pin this struct)
- * - this also keeps track of keys under request from userspace for this UID
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
*/
struct key_user {
struct rb_node node;
@@ -62,7 +64,7 @@ extern struct key_user *key_user_lookup(uid_t uid,
extern void key_user_put(struct key_user *user);
/*
- * key quota limits
+ * Key quota limits.
* - root has its own separate limits to everyone else
*/
extern unsigned key_quota_root_maxkeys;
@@ -146,13 +148,13 @@ extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
extern void key_schedule_gc(time_t expiry_at);
-/*
- * check to see whether permission is granted to use a key in the desired way
- */
extern int key_task_permission(const key_ref_t key_ref,
const struct cred *cred,
key_perm_t perm);
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
{
return key_task_permission(key_ref, current_cred(), perm);
@@ -168,7 +170,7 @@ static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
#define KEY_ALL 0x3f /* all the above permissions */
/*
- * request_key authorisation
+ * Authorisation record for request_key().
*/
struct request_key_auth {
struct key *target_key;
@@ -188,7 +190,7 @@ extern struct key *request_key_auth_new(struct key *target,
extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
- * keyctl functions
+ * keyctl() functions
*/
extern long keyctl_get_keyring_ID(key_serial_t, int);
extern long keyctl_join_session_keyring(const char __user *);
@@ -214,7 +216,7 @@ extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
extern long keyctl_session_to_parent(void);
/*
- * debugging key validation
+ * Debugging key validation
*/
#ifdef KEY_DEBUGGING
extern void __key_check(const struct key *);
diff --git a/security/keys/key.c b/security/keys/key.c
index c1eac8084ade..84d4eb568b08 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -39,10 +39,10 @@ static DECLARE_RWSEM(key_types_sem);
static void key_cleanup(struct work_struct *work);
static DECLARE_WORK(key_cleanup_task, key_cleanup);
-/* we serialise key instantiation and link */
+/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
-/* any key who's type gets unegistered will be re-typed to this */
+/* Any key who's type gets unegistered will be re-typed to this */
static struct key_type key_type_dead = {
.name = "dead",
};
@@ -56,10 +56,9 @@ void __key_check(const struct key *key)
}
#endif
-/*****************************************************************************/
/*
- * get the key quota record for a user, allocating a new record if one doesn't
- * already exist
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
*/
struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
{
@@ -67,7 +66,7 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
struct rb_node *parent = NULL;
struct rb_node **p;
- try_again:
+try_again:
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
@@ -124,18 +123,16 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
goto out;
/* okay - we found a user record for this UID */
- found:
+found:
atomic_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
- out:
+out:
return user;
+}
-} /* end key_user_lookup() */
-
-/*****************************************************************************/
/*
- * dispose of a user structure
+ * Dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
@@ -146,14 +143,11 @@ void key_user_put(struct key_user *user)
kfree(user);
}
+}
-} /* end key_user_put() */
-
-/*****************************************************************************/
/*
- * assign a key the next unique serial number
- * - these are assigned randomly to avoid security issues through covert
- * channel problems
+ * Allocate a serial number for a key. These are assigned randomly to avoid
+ * security issues through covert channel problems.
*/
static inline void key_alloc_serial(struct key *key)
{
@@ -211,18 +205,36 @@ serial_exists:
if (key->serial < xkey->serial)
goto attempt_insertion;
}
+}
-} /* end key_alloc_serial() */
-
-/*****************************************************************************/
-/*
- * allocate a key of the specified type
- * - update the user's quota to reflect the existence of the key
- * - called from a key-type operation with key_types_sem read-locked by
- * key_create_or_update()
- * - this prevents unregistration of the key type
- * - upon return the key is as yet uninstantiated; the caller needs to either
- * instantiate the key or discard it before returning
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ *
+ * Allocate a key of the specified type with the attributes given. The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved. The
+ * instantiation function should amend this as necessary. If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem. Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
uid_t uid, gid_t gid, const struct cred *cred,
@@ -344,14 +356,19 @@ no_quota:
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
-
-} /* end key_alloc() */
-
+}
EXPORT_SYMBOL(key_alloc);
-/*****************************************************************************/
-/*
- * reserve an amount of quota for the key's payload
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
@@ -384,15 +401,14 @@ int key_payload_reserve(struct key *key, size_t datalen)
key->datalen = datalen;
return ret;
-
-} /* end key_payload_reserve() */
-
+}
EXPORT_SYMBOL(key_payload_reserve);
-/*****************************************************************************/
/*
- * instantiate a key and link it into the target keyring atomically
- * - called with the target keyring's semaphore writelocked
+ * Instantiate a key and link it into the target keyring atomically. Must be
+ * called with the target keyring's semaphore writelocked. The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
const void *data,
@@ -441,12 +457,23 @@ static int __key_instantiate_and_link(struct key *key,
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
+}
-} /* end __key_instantiate_and_link() */
-
-/*****************************************************************************/
-/*
- * instantiate a key and link it into the target keyring atomically
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
*/
int key_instantiate_and_link(struct key *key,
const void *data,
@@ -471,14 +498,28 @@ int key_instantiate_and_link(struct key *key,
__key_link_end(keyring, key->type, prealloc);
return ret;
-
-} /* end key_instantiate_and_link() */
+}
EXPORT_SYMBOL(key_instantiate_and_link);
-/*****************************************************************************/
-/*
- * negatively instantiate a key and link it into the target keyring atomically
+/**
+ * key_negate_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and link it in to the destination keyring if one
+ * is supplied. The key and any links to the key will be automatically garbage
+ * collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
*/
int key_negate_and_link(struct key *key,
unsigned timeout,
@@ -535,22 +576,23 @@ int key_negate_and_link(struct key *key,
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
-
-} /* end key_negate_and_link() */
+}
EXPORT_SYMBOL(key_negate_and_link);
-/*****************************************************************************/
/*
- * do cleaning up in process context so that we don't have to disable
- * interrupts all over the place
+ * Garbage collect keys in process context so that we don't have to disable
+ * interrupts all over the place.
+ *
+ * key_put() schedules this rather than trying to do the cleanup itself, which
+ * means key_put() doesn't have to sleep.
*/
static void key_cleanup(struct work_struct *work)
{
struct rb_node *_n;
struct key *key;
- go_again:
+go_again:
/* look for a dead key in the tree */
spin_lock(&key_serial_lock);
@@ -564,7 +606,7 @@ static void key_cleanup(struct work_struct *work)
spin_unlock(&key_serial_lock);
return;
- found_dead_key:
+found_dead_key:
/* we found a dead key - once we've removed it from the tree, we can
* drop the lock */
rb_erase(&key->serial_node, &key_serial_tree);
@@ -601,14 +643,15 @@ static void key_cleanup(struct work_struct *work)
/* there may, of course, be more than one key to destroy */
goto go_again;
+}
-} /* end key_cleanup() */
-
-/*****************************************************************************/
-/*
- * dispose of a reference to a key
- * - when all the references are gone, we schedule the cleanup task to come and
- * pull it out of the tree in definite process context
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
*/
void key_put(struct key *key)
{
@@ -618,14 +661,11 @@ void key_put(struct key *key)
if (atomic_dec_and_test(&key->usage))
schedule_work(&key_cleanup_task);
}
-
-} /* end key_put() */
-
+}
EXPORT_SYMBOL(key_put);
-/*****************************************************************************/
/*
- * find a key by its serial number
+ * Find a key by its serial number.
*/
struct key *key_lookup(key_serial_t id)
{
@@ -647,11 +687,11 @@ struct key *key_lookup(key_serial_t id)
goto found;
}
- not_found:
+not_found:
key = ERR_PTR(-ENOKEY);
goto error;
- found:
+found:
/* pretend it doesn't exist if it is awaiting deletion */
if (atomic_read(&key->usage) == 0)
goto not_found;
@@ -661,16 +701,16 @@ struct key *key_lookup(key_serial_t id)
*/
atomic_inc(&key->usage);
- error:
+error:
spin_unlock(&key_serial_lock);
return key;
+}
-} /* end key_lookup() */
-
-/*****************************************************************************/
/*
- * find and lock the specified key type against removal
- * - we return with the sem readlocked
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful. If the type wasn't
+ * available -ENOKEY is returned instead.
*/
struct key_type *key_type_lookup(const char *type)
{
@@ -688,26 +728,23 @@ struct key_type *key_type_lookup(const char *type)
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
- found_kernel_type:
+found_kernel_type:
return ktype;
+}
-} /* end key_type_lookup() */
-
-/*****************************************************************************/
/*
- * unlock a key type
+ * Unlock a key type locked by key_type_lookup().
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
+}
-} /* end key_type_put() */
-
-/*****************************************************************************/
/*
- * attempt to update an existing key
- * - the key has an incremented refcount
- * - we need to put the key if we get an error
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
const void *payload, size_t plen)
@@ -742,13 +779,32 @@ error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
+}
-} /* end __key_update() */
-
-/*****************************************************************************/
-/*
- * search the specified keyring for a key of the same description; if one is
- * found, update it, otherwise add a new one
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
@@ -855,14 +911,21 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = __key_update(key_ref, payload, plen);
goto error;
-
-} /* end key_create_or_update() */
-
+}
EXPORT_SYMBOL(key_create_or_update);
-/*****************************************************************************/
-/*
- * update a key
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data. The
+ * caller must be granted Write permission on the key. Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating. The key type may return other errors.
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
@@ -891,14 +954,17 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
error:
return ret;
-
-} /* end key_update() */
-
+}
EXPORT_SYMBOL(key_update);
-/*****************************************************************************/
-/*
- * revoke a key
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources. The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
*/
void key_revoke(struct key *key)
{
@@ -926,14 +992,16 @@ void key_revoke(struct key *key)
}
up_write(&key->sem);
-
-} /* end key_revoke() */
-
+}
EXPORT_SYMBOL(key_revoke);
-/*****************************************************************************/
-/*
- * register a type of key
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
*/
int register_key_type(struct key_type *ktype)
{
@@ -953,17 +1021,19 @@ int register_key_type(struct key_type *ktype)
list_add(&ktype->link, &key_types_list);
ret = 0;
- out:
+out:
up_write(&key_types_sem);
return ret;
-
-} /* end register_key_type() */
-
+}
EXPORT_SYMBOL(register_key_type);
-/*****************************************************************************/
-/*
- * unregister a type of key
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
*/
void unregister_key_type(struct key_type *ktype)
{
@@ -1010,14 +1080,11 @@ void unregister_key_type(struct key_type *ktype)
up_write(&key_types_sem);
key_schedule_gc(0);
-
-} /* end unregister_key_type() */
-
+}
EXPORT_SYMBOL(unregister_key_type);
-/*****************************************************************************/
/*
- * initialise the key management stuff
+ * Initialise the key management state.
*/
void __init key_init(void)
{
@@ -1037,5 +1104,4 @@ void __init key_init(void)
rb_insert_color(&root_key_user.node,
&key_user_tree);
-
-} /* end key_init() */
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 60924f6a52db..31a0fd8189f1 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1,4 +1,4 @@
-/* keyctl.c: userspace keyctl operations
+/* Userspace key control operations
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -31,28 +31,24 @@ static int key_get_type_from_user(char *type,
int ret;
ret = strncpy_from_user(type, _type, len);
-
if (ret < 0)
return ret;
-
if (ret == 0 || ret >= len)
return -EINVAL;
-
if (type[0] == '.')
return -EPERM;
-
type[len - 1] = '\0';
-
return 0;
}
-/*****************************************************************************/
/*
- * extract the description of a new key from userspace and either add it as a
- * new key to the specified keyring or update a matching key in that keyring
- * - the keyring must be writable
- * - returns the new key's serial number
- * - implements add_key()
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
*/
SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
@@ -132,19 +128,20 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
kfree(description);
error:
return ret;
+}
-} /* end sys_add_key() */
-
-/*****************************************************************************/
/*
- * search the process keyrings for a matching key
- * - nested keyrings may also be searched if they have Search permission
- * - if a key is found, it will be attached to the destination keyring if
- * there's one specified
- * - /sbin/request-key will be invoked if _callout_info is non-NULL
- * - the _callout_info string will be passed to /sbin/request-key
- * - if the _callout_info string is empty, it will be rendered as "-"
- * - implements request_key()
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key. Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key. The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request. If the
+ * _callout_info string is "" then it will be changed to "-".
*/
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
@@ -222,14 +219,14 @@ error2:
kfree(description);
error:
return ret;
+}
-} /* end sys_request_key() */
-
-/*****************************************************************************/
/*
- * get the ID of the specified process keyring
- * - the keyring must have search permission to be found
- * - implements keyctl(KEYCTL_GET_KEYRING_ID)
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
*/
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
@@ -248,13 +245,17 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
key_ref_put(key_ref);
error:
return ret;
+}
-} /* end keyctl_get_keyring_ID() */
-
-/*****************************************************************************/
/*
- * join the session keyring
- * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING)
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary. A named session keyring must have Search
+ * permission for it to be joined. Session keyrings without this permit will
+ * be skipped over.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
*/
long keyctl_join_session_keyring(const char __user *_name)
{
@@ -277,14 +278,17 @@ long keyctl_join_session_keyring(const char __user *_name)
error:
return ret;
+}
-} /* end keyctl_join_session_keyring() */
-
-/*****************************************************************************/
/*
- * update a key's data payload
- * - the key must be writable
- * - implements keyctl(KEYCTL_UPDATE)
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work. A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned. If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
*/
long keyctl_update_key(key_serial_t id,
const void __user *_payload,
@@ -326,14 +330,17 @@ error2:
kfree(payload);
error:
return ret;
+}
-} /* end keyctl_update_key() */
-
-/*****************************************************************************/
/*
- * revoke a key
- * - the key must be writable
- * - implements keyctl(KEYCTL_REVOKE)
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work. The key type should give up its quota claim when revoked. The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
@@ -358,14 +365,14 @@ long keyctl_revoke_key(key_serial_t id)
key_ref_put(key_ref);
error:
return ret;
+}
-} /* end keyctl_revoke_key() */
-
-/*****************************************************************************/
/*
- * clear the specified process keyring
- * - the keyring must be writable
- * - implements keyctl(KEYCTL_CLEAR)
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission for this to work. If
+ * successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
@@ -383,15 +390,18 @@ long keyctl_keyring_clear(key_serial_t ringid)
key_ref_put(keyring_ref);
error:
return ret;
+}
-} /* end keyctl_keyring_clear() */
-
-/*****************************************************************************/
/*
- * link a key into a keyring
- * - the keyring must be writable
- * - the key must be linkable
- * - implements keyctl(KEYCTL_LINK)
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission. Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
{
@@ -417,15 +427,16 @@ error2:
key_ref_put(keyring_ref);
error:
return ret;
+}
-} /* end keyctl_keyring_link() */
-
-/*****************************************************************************/
/*
- * unlink the first attachment of a key from a keyring
- * - the keyring must be writable
- * - we don't need any permissions on the key
- * - implements keyctl(KEYCTL_UNLINK)
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything. If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
@@ -451,19 +462,20 @@ error2:
key_ref_put(keyring_ref);
error:
return ret;
+}
-} /* end keyctl_keyring_unlink() */
-
-/*****************************************************************************/
/*
- * describe a user key
- * - the key must have view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of description available,
- * irrespective of how much we may have copied
- * - the description is formatted thus:
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
* type;uid;gid;perm;description<NUL>
- * - implements keyctl(KEYCTL_DESCRIBE)
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
*/
long keyctl_describe_key(key_serial_t keyid,
char __user *buffer,
@@ -531,18 +543,17 @@ error2:
key_ref_put(key_ref);
error:
return ret;
+}
-} /* end keyctl_describe_key() */
-
-/*****************************************************************************/
/*
- * search the specified keyring for a matching key
- * - the start keyring must be searchable
- * - nested keyrings may also be searched if they are searchable
- * - only keys with search permission may be found
- * - if a key is found, it will be attached to the destination keyring if
- * there's one specified
- * - implements keyctl(KEYCTL_SEARCH)
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key. Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring). Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
*/
long keyctl_keyring_search(key_serial_t ringid,
const char __user *_type,
@@ -626,18 +637,17 @@ error2:
kfree(description);
error:
return ret;
+}
-} /* end keyctl_keyring_search() */
-
-/*****************************************************************************/
/*
- * read a user key's payload
- * - the keyring must be readable or the key must be searchable from the
- * process's keyrings
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of data in the key,
- * irrespective of how much we may have copied
- * - implements keyctl(KEYCTL_READ)
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
*/
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
@@ -688,15 +698,22 @@ error2:
key_put(key);
error:
return ret;
+}
-} /* end keyctl_read_key() */
-
-/*****************************************************************************/
/*
- * change the ownership of a key
- * - the keyring owned by the changer
- * - if the uid or gid is -1, then that parameter is not changed
- * - implements keyctl(KEYCTL_CHOWN)
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability. If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key. The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
{
@@ -796,14 +813,14 @@ quota_overrun:
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
+}
-} /* end keyctl_chown_key() */
-
-/*****************************************************************************/
/*
- * change the permission mask on a key
- * - the keyring owned by the changer
- * - implements keyctl(KEYCTL_SETPERM)
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
*/
long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
{
@@ -838,11 +855,11 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
key_put(key);
error:
return ret;
-
-} /* end keyctl_setperm_key() */
+}
/*
- * get the destination keyring for instantiation
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
*/
static long get_instantiation_keyring(key_serial_t ringid,
struct request_key_auth *rka,
@@ -879,7 +896,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
}
/*
- * change the request_key authorisation key on the current process
+ * Change the request_key authorisation key on the current process.
*/
static int keyctl_change_reqkey_auth(struct key *key)
{
@@ -895,10 +912,14 @@ static int keyctl_change_reqkey_auth(struct key *key)
return commit_creds(new);
}
-/*****************************************************************************/
/*
- * instantiate the key with the specified payload, and, if one is given, link
- * the key into the keyring
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_instantiate_key(key_serial_t id,
const void __user *_payload,
@@ -973,13 +994,22 @@ error2:
vfree(payload);
error:
return ret;
+}
-} /* end keyctl_instantiate_key() */
-
-/*****************************************************************************/
/*
- * negatively instantiate the key with the given timeout (in seconds), and, if
- * one is given, link the key into the keyring
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
{
@@ -1020,13 +1050,14 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
error:
return ret;
+}
-} /* end keyctl_negate_key() */
-
-/*****************************************************************************/
/*
- * set the default keyring in which request_key() will cache keys
- * - return the old setting
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a process keyring is specified then this will be created if it doesn't
+ * yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1079,12 +1110,19 @@ set:
error:
abort_creds(new);
return ret;
+}
-} /* end keyctl_set_reqkey_keyring() */
-
-/*****************************************************************************/
/*
- * set or clear the timeout for a key
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time. The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
@@ -1136,12 +1174,24 @@ okay:
ret = 0;
error:
return ret;
+}
-} /* end keyctl_set_timeout() */
-
-/*****************************************************************************/
/*
- * assume the authority to instantiate the specified key
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated. It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned. The authorisation key can be read to get
+ * the callout information passed to request_key().
*/
long keyctl_assume_authority(key_serial_t id)
{
@@ -1178,16 +1228,17 @@ long keyctl_assume_authority(key_serial_t id)
ret = authkey->serial;
error:
return ret;
-
-} /* end keyctl_assume_authority() */
+}
/*
- * get the security label of a key
- * - the key must grant us view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of information available,
- * irrespective of how much we may have copied (including the terminal NUL)
- * - implements keyctl(KEYCTL_GET_SECURITY)
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
*/
long keyctl_get_security(key_serial_t keyid,
char __user *buffer,
@@ -1242,10 +1293,16 @@ long keyctl_get_security(key_serial_t keyid,
}
/*
- * attempt to install the calling process's session keyring on the process's
- * parent process
- * - the keyring must exist and must grant us LINK permission
- * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_session_to_parent(void)
{
@@ -1348,9 +1405,8 @@ error_keyring:
#endif /* !TIF_NOTIFY_RESUME */
}
-/*****************************************************************************/
/*
- * the key control system call
+ * The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
@@ -1439,5 +1495,4 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
default:
return -EOPNOTSUPP;
}
-
-} /* end sys_keyctl() */
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d37f713e73ce..92024ed12e0a 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -26,13 +26,13 @@
rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
/*
- * when plumbing the depths of the key tree, this sets a hard limit set on how
- * deep we're willing to go
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
*/
#define KEYRING_SEARCH_MAX_DEPTH 6
/*
- * we keep all named keyrings in a hash to speed looking them up
+ * We keep all named keyrings in a hash to speed looking them up.
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
@@ -50,7 +50,9 @@ static inline unsigned keyring_hash(const char *desc)
}
/*
- * the keyring type definition
+ * The keyring key type definition. Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
*/
static int keyring_instantiate(struct key *keyring,
const void *data, size_t datalen);
@@ -71,19 +73,17 @@ struct key_type key_type_keyring = {
.describe = keyring_describe,
.read = keyring_read,
};
-
EXPORT_SYMBOL(key_type_keyring);
/*
- * semaphore to serialise link/link calls to prevent two link calls in parallel
- * introducing a cycle
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
*/
static DECLARE_RWSEM(keyring_serialise_link_sem);
-/*****************************************************************************/
/*
- * publish the name of a keyring so that it can be found by name (if it has
- * one)
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one).
*/
static void keyring_publish_name(struct key *keyring)
{
@@ -102,13 +102,12 @@ static void keyring_publish_name(struct key *keyring)
write_unlock(&keyring_name_lock);
}
+}
-} /* end keyring_publish_name() */
-
-/*****************************************************************************/
/*
- * initialise a keyring
- * - we object if we were given any data
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
*/
static int keyring_instantiate(struct key *keyring,
const void *data, size_t datalen)
@@ -123,23 +122,20 @@ static int keyring_instantiate(struct key *keyring,
}
return ret;
+}
-} /* end keyring_instantiate() */
-
-/*****************************************************************************/
/*
- * match keyrings on their name
+ * Match keyrings on their name
*/
static int keyring_match(const struct key *keyring, const void *description)
{
return keyring->description &&
strcmp(keyring->description, description) == 0;
+}
-} /* end keyring_match() */
-
-/*****************************************************************************/
/*
- * dispose of the data dangling from the corpse of a keyring
+ * Clean up a keyring when it is destroyed. Unpublish its name if it had one
+ * and dispose of its data.
*/
static void keyring_destroy(struct key *keyring)
{
@@ -164,12 +160,10 @@ static void keyring_destroy(struct key *keyring)
key_put(klist->keys[loop]);
kfree(klist);
}
+}
-} /* end keyring_destroy() */
-
-/*****************************************************************************/
/*
- * describe the keyring
+ * Describe a keyring for /proc.
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
@@ -187,13 +181,12 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
else
seq_puts(m, ": empty");
rcu_read_unlock();
+}
-} /* end keyring_describe() */
-
-/*****************************************************************************/
/*
- * read a list of key IDs from the keyring's contents
- * - the keyring's semaphore is read-locked
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
@@ -241,12 +234,10 @@ static long keyring_read(const struct key *keyring,
error:
return ret;
+}
-} /* end keyring_read() */
-
-/*****************************************************************************/
/*
- * allocate a keyring and link into the destination keyring
+ * Allocate a keyring and link into the destination keyring.
*/
struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
const struct cred *cred, unsigned long flags,
@@ -269,20 +260,42 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
}
return keyring;
+}
-} /* end keyring_alloc() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we use the supplied match function to see if the description (or other
- * feature of interest) matches
- * - we rely on RCU to prevent the keyring lists from disappearing on us
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
- * - we propagate the possession attribute from the keyring ref to the key ref
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @cred: The credentials to use for permissions checks.
+ * @type: The type of key to search for.
+ * @description: Parameter for @match.
+ * @match: Function to rule on whether or not a key is the one required.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU is used to prevent the keyring key lists from disappearing without the
+ * need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
*/
key_ref_t keyring_search_aux(key_ref_t keyring_ref,
const struct cred *cred,
@@ -444,17 +457,16 @@ error_2:
rcu_read_unlock();
error:
return key_ref;
+}
-} /* end keyring_search_aux() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we readlock the keyrings as we search down the tree
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ *
+ * As keyring_search_aux() above, but using the current task's credentials and
+ * type's default matching function.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
@@ -465,16 +477,23 @@ key_ref_t keyring_search(key_ref_t keyring,
return keyring_search_aux(keyring, current->cred,
type, description, type->match);
-
-} /* end keyring_search() */
-
+}
EXPORT_SYMBOL(keyring_search);
-/*****************************************************************************/
/*
- * search the given keyring only (no recursion)
- * - keyring must be locked by caller
- * - caller must guarantee that the keyring is a keyring
+ * Search the given keyring only (no recursion).
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to search the keyring as no check is made here.
+ *
+ * RCU is used to make it unnecessary to lock the keyring key list here.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns -ENOKEY if not found. Revoked keys and keys not
+ * providing the requested permission are skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
*/
key_ref_t __keyring_search_one(key_ref_t keyring_ref,
const struct key_type *ktype,
@@ -514,14 +533,18 @@ found:
atomic_inc(&key->usage);
rcu_read_unlock();
return make_key_ref(key, possessed);
+}
-} /* end __keyring_search_one() */
-
-/*****************************************************************************/
/*
- * find a keyring with the specified name
- * - all named keyrings are searched
- * - normally only finds keyrings with search permission for the current process
+ * Find a keyring with the specified name.
+ *
+ * All named keyrings in the current user namespace are searched, provided they
+ * grant Search permission directly to the caller (unless this check is
+ * skipped). Keyrings whose usage points have reached zero or who have been
+ * revoked are skipped.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success. -ENOKEY is returned if a key could not be found.
*/
struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
{
@@ -569,15 +592,14 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
out:
read_unlock(&keyring_name_lock);
return keyring;
+}
-} /* end find_keyring_by_name() */
-
-/*****************************************************************************/
/*
- * see if a cycle will will be created by inserting acyclic tree B in acyclic
- * tree A at the topmost level (ie: as a direct child of A)
- * - since we are adding B to A at the top level, checking for cycles should
- * just be a matter of seeing if node A is somewhere in tree B
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
@@ -657,11 +679,10 @@ too_deep:
cycle_detected:
ret = -EDEADLK;
goto error;
-
-} /* end keyring_detect_cycle() */
+}
/*
- * dispose of a keyring list after the RCU grace period, freeing the unlinked
+ * Dispose of a keyring list after the RCU grace period, freeing the unlinked
* key
*/
static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
@@ -675,7 +696,7 @@ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
}
/*
- * preallocate memory so that a key can be linked into to a keyring
+ * Preallocate memory so that a key can be linked into to a keyring.
*/
int __key_link_begin(struct key *keyring, const struct key_type *type,
const char *description,
@@ -792,10 +813,10 @@ error_krsem:
}
/*
- * check already instantiated keys aren't going to be a problem
- * - the caller must have called __key_link_begin()
- * - don't need to call this for keys that were created since __key_link_begin()
- * was called
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
*/
int __key_link_check_live_key(struct key *keyring, struct key *key)
{
@@ -807,9 +828,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
}
/*
- * link a key into to a keyring
- * - must be called with __key_link_begin() having being called
- * - discard already extant link to matching key if there is one
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called. Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
*/
void __key_link(struct key *keyring, struct key *key,
struct keyring_list **_prealloc)
@@ -852,8 +876,9 @@ void __key_link(struct key *keyring, struct key *key,
}
/*
- * finish linking a key into to a keyring
- * - must be called with __key_link_begin() having being called
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
*/
void __key_link_end(struct key *keyring, struct key_type *type,
struct keyring_list *prealloc)
@@ -874,8 +899,25 @@ void __key_link_end(struct key *keyring, struct key_type *type,
up_write(&keyring->sem);
}
-/*
- * link a key to a keyring
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
*/
int key_link(struct key *keyring, struct key *key)
{
@@ -895,12 +937,24 @@ int key_link(struct key *keyring, struct key *key)
return ret;
}
-
EXPORT_SYMBOL(key_link);
-/*****************************************************************************/
-/*
- * unlink the first link to a key from a keyring
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
*/
int key_unlink(struct key *keyring, struct key *key)
{
@@ -968,15 +1022,12 @@ nomem:
ret = -ENOMEM;
up_write(&keyring->sem);
goto error;
-
-} /* end key_unlink() */
-
+}
EXPORT_SYMBOL(key_unlink);
-/*****************************************************************************/
/*
- * dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to
+ * Dispose of a keyring list after the RCU grace period, releasing the keys it
+ * links to.
*/
static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
{
@@ -989,13 +1040,15 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
key_put(klist->keys[loop]);
kfree(klist);
+}
-} /* end keyring_clear_rcu_disposal() */
-
-/*****************************************************************************/
-/*
- * clear the specified process keyring
- * - implements keyctl(KEYCTL_CLEAR)
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
*/
int keyring_clear(struct key *keyring)
{
@@ -1027,15 +1080,13 @@ int keyring_clear(struct key *keyring)
}
return ret;
-
-} /* end keyring_clear() */
-
+}
EXPORT_SYMBOL(keyring_clear);
-/*****************************************************************************/
/*
- * dispose of the links from a revoked keyring
- * - called with the key sem write-locked
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
*/
static void keyring_revoke(struct key *keyring)
{
@@ -1050,11 +1101,10 @@ static void keyring_revoke(struct key *keyring)
rcu_assign_pointer(keyring->payload.subscriptions, NULL);
call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
}
-
-} /* end keyring_revoke() */
+}
/*
- * Determine whether a key is dead
+ * Determine whether a key is dead.
*/
static bool key_is_dead(struct key *key, time_t limit)
{
@@ -1063,7 +1113,12 @@ static bool key_is_dead(struct key *key, time_t limit)
}
/*
- * Collect garbage from the contents of a keyring
+ * Collect garbage from the contents of a keyring, replacing the old list with
+ * a new one with the pointers all shuffled down.
+ *
+ * Dead keys are classed as oned that are flagged as being dead or are revoked,
+ * expired or negative keys that were revoked or expired before the specified
+ * limit.
*/
void keyring_gc(struct key *keyring, time_t limit)
{
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 28645502cd0d..c35b5229e3cd 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -1,4 +1,4 @@
-/* permission.c: key permission determination
+/* Key permission checking
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -13,18 +13,19 @@
#include <linux/security.h>
#include "internal.h"
-/*****************************************************************************/
/**
* key_task_permission - Check a key can be used
- * @key_ref: The key to check
- * @cred: The credentials to use
- * @perm: The permissions to check for
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @perm: The permissions to check for.
*
* Check to see whether permission is granted to use a key in the desired way,
* but permit the security modules to override.
*
- * The caller must hold either a ref on cred or must hold the RCU readlock or a
- * spinlock.
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
*/
int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
key_perm_t perm)
@@ -79,14 +80,16 @@ use_these_perms:
/* let LSM be the final arbiter */
return security_key_permission(key_ref, cred, perm);
-
-} /* end key_task_permission() */
-
+}
EXPORT_SYMBOL(key_task_permission);
-/*****************************************************************************/
-/*
- * validate a key
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if
+ * the key's type has been removed or if the key has been revoked or
+ * -EKEYEXPIRED if the key has expired.
*/
int key_validate(struct key *key)
{
@@ -111,7 +114,5 @@ int key_validate(struct key *key)
error:
return ret;
-
-} /* end key_validate() */
-
+}
EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 70373966816e..525cf8a29cdd 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -1,4 +1,4 @@
-/* proc.c: proc files for key database enumeration
+/* procfs files for key database enumeration
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -60,9 +60,8 @@ static const struct file_operations proc_key_users_fops = {
.release = seq_release,
};
-/*****************************************************************************/
/*
- * declare the /proc files
+ * Declare the /proc files.
*/
static int __init key_proc_init(void)
{
@@ -79,14 +78,13 @@ static int __init key_proc_init(void)
panic("Cannot create /proc/key-users\n");
return 0;
-
-} /* end key_proc_init() */
+}
__initcall(key_proc_init);
-/*****************************************************************************/
/*
- * implement "/proc/keys" to provides a list of the keys on the system
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
@@ -293,9 +291,9 @@ static struct rb_node *key_user_first(struct rb_root *r)
return __key_user_next(n);
}
-/*****************************************************************************/
/*
- * implement "/proc/key-users" to provides a list of the key users
+ * Implement "/proc/key-users" to provides a list of the key users and their
+ * quotas.
*/
static int proc_key_users_open(struct inode *inode, struct file *file)
{
@@ -351,5 +349,4 @@ static int proc_key_users_show(struct seq_file *m, void *v)
maxbytes);
return 0;
-
}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 504bdd2452bd..930634e45149 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -1,4 +1,4 @@
-/* Management of a process's keyrings
+/* Manage a process's keyrings
*
* Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -21,13 +21,13 @@
#include <asm/uaccess.h>
#include "internal.h"
-/* session keyring create vs join semaphore */
+/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
-/* user keyring creation semaphore */
+/* User keyring creation semaphore */
static DEFINE_MUTEX(key_user_keyring_mutex);
-/* the root user's tracking struct */
+/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = ATOMIC_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
@@ -38,9 +38,8 @@ struct key_user root_key_user = {
.user_ns = &init_user_ns,
};
-/*****************************************************************************/
/*
- * install user and user session keyrings for a particular UID
+ * Install the user and user session keyrings for the current process's UID.
*/
int install_user_keyrings(void)
{
@@ -122,7 +121,8 @@ error:
}
/*
- * install a fresh thread keyring directly to new credentials
+ * Install a fresh thread keyring directly to new credentials. This keyring is
+ * allowed to overrun the quota.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
@@ -138,7 +138,7 @@ int install_thread_keyring_to_cred(struct cred *new)
}
/*
- * install a fresh thread keyring, discarding the old one
+ * Install a fresh thread keyring, discarding the old one.
*/
static int install_thread_keyring(void)
{
@@ -161,9 +161,10 @@ static int install_thread_keyring(void)
}
/*
- * install a process keyring directly to a credentials struct
- * - returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other -ve on any other error
+ * Install a process keyring directly to a credentials struct.
+ *
+ * Returns -EEXIST if there was already a process keyring, 0 if one installed,
+ * and other value on any other error
*/
int install_process_keyring_to_cred(struct cred *new)
{
@@ -192,8 +193,11 @@ int install_process_keyring_to_cred(struct cred *new)
}
/*
- * make sure a process keyring is installed
- * - we
+ * Make sure a process keyring is installed for the current process. The
+ * existing process keyring is not replaced.
+ *
+ * Returns 0 if there is a process keyring by the end of this function, some
+ * error otherwise.
*/
static int install_process_keyring(void)
{
@@ -214,7 +218,7 @@ static int install_process_keyring(void)
}
/*
- * install a session keyring directly to a credentials struct
+ * Install a session keyring directly to a credentials struct.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
@@ -254,8 +258,8 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
}
/*
- * install a session keyring, discarding the old one
- * - if a keyring is not supplied, an empty one is invented
+ * Install a session keyring, discarding the old one. If a keyring is not
+ * supplied, an empty one is invented.
*/
static int install_session_keyring(struct key *keyring)
{
@@ -275,9 +279,8 @@ static int install_session_keyring(struct key *keyring)
return commit_creds(new);
}
-/*****************************************************************************/
/*
- * the filesystem user ID changed
+ * Handle the fsuid changing.
*/
void key_fsuid_changed(struct task_struct *tsk)
{
@@ -288,12 +291,10 @@ void key_fsuid_changed(struct task_struct *tsk)
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
+}
-} /* end key_fsuid_changed() */
-
-/*****************************************************************************/
/*
- * the filesystem group ID changed
+ * Handle the fsgid changing.
*/
void key_fsgid_changed(struct task_struct *tsk)
{
@@ -304,16 +305,28 @@ void key_fsgid_changed(struct task_struct *tsk)
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
+}
-} /* end key_fsgid_changed() */
-
-/*****************************************************************************/
/*
- * search only my process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- * feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key.
+ *
+ * The search criteria are the type and the match function. The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search. Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials. Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too. Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
*/
key_ref_t search_my_process_keyrings(struct key_type *type,
const void *description,
@@ -428,13 +441,13 @@ found:
return key_ref;
}
-/*****************************************************************************/
/*
- * search the process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- * feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * Return same as search_my_process_keyrings().
*/
key_ref_t search_process_keyrings(struct key_type *type,
const void *description,
@@ -489,24 +502,33 @@ key_ref_t search_process_keyrings(struct key_type *type,
found:
return key_ref;
+}
-} /* end search_process_keyrings() */
-
-/*****************************************************************************/
/*
- * see if the key we're looking at is the target key
+ * See if the key we're looking at is the target key.
*/
int lookup_user_key_possessed(const struct key *key, const void *target)
{
return key == target;
+}
-} /* end lookup_user_key_possessed() */
-
-/*****************************************************************************/
/*
- * lookup a key given a key ID from userspace with a given permissions mask
- * - don't create special keyrings unless so requested
- * - partially constructed keys aren't found unless requested
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
*/
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
@@ -711,15 +733,18 @@ invalid_key:
reget_creds:
put_cred(cred);
goto try_again;
+}
-} /* end lookup_user_key() */
-
-/*****************************************************************************/
/*
- * join the named keyring as the session keyring if possible, or attempt to
- * create a new one of that name if not
- * - if the name is NULL, an empty anonymous keyring is installed instead
- * - named session keyring joining is done with a semaphore held
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
*/
long join_session_keyring(const char *name)
{
@@ -791,8 +816,8 @@ error:
}
/*
- * Replace a process's session keyring when that process resumes userspace on
- * behalf of one of its children
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target process is about to resume userspace execution.
*/
void key_replace_session_keyring(void)
{
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 0ea52d25a6bd..9a7fb3914b27 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -39,8 +39,14 @@ static int key_wait_bit_intr(void *flags)
return signal_pending(current) ? -ERESTARTSYS : 0;
}
-/*
- * call to complete the construction of a key
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @cons: The key construction record.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key. The key will be negated
+ * if an error is indicated. The authorisation key will be revoked
+ * unconditionally.
*/
void complete_request_key(struct key_construction *cons, int error)
{
@@ -58,23 +64,33 @@ void complete_request_key(struct key_construction *cons, int error)
}
EXPORT_SYMBOL(complete_request_key);
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
static int umh_keys_init(struct subprocess_info *info)
{
struct cred *cred = (struct cred*)current_cred();
struct key *keyring = info->data;
- /*
- * This is called in context of freshly forked kthread before
- * kernel_execve(), we can just change our ->session_keyring.
- */
+
return install_session_keyring_to_cred(cred, keyring);
}
+/*
+ * Clean up a usermode helper with session keyring.
+ */
static void umh_keys_cleanup(struct subprocess_info *info)
{
struct key *keyring = info->data;
key_put(keyring);
}
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
static int call_usermodehelper_keys(char *path, char **argv, char **envp,
struct key *session_keyring, enum umh_wait wait)
{
@@ -91,7 +107,7 @@ static int call_usermodehelper_keys(char *path, char **argv, char **envp,
}
/*
- * request userspace finish the construction of a key
+ * Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
static int call_sbin_request_key(struct key_construction *cons,
@@ -198,8 +214,9 @@ error_alloc:
}
/*
- * call out to userspace for key construction
- * - we ignore program failure and go on key status instead
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
*/
static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
@@ -246,9 +263,10 @@ static int construct_key(struct key *key, const void *callout_info,
}
/*
- * get the appropriate destination keyring for the request
- * - we return whatever keyring we select with an extra reference upon it which
- * the caller must release
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
*/
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
@@ -321,9 +339,11 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
}
/*
- * allocate a new key in under-construction state and attempt to link it in to
- * the requested place
- * - may return a key that's already under construction instead
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
*/
static int construct_alloc_key(struct key_type *type,
const char *description,
@@ -414,7 +434,7 @@ alloc_failed:
}
/*
- * commence key construction
+ * Commence key construction.
*/
static struct key *construct_key_and_link(struct key_type *type,
const char *description,
@@ -465,12 +485,32 @@ construction_failed:
return ERR_PTR(ret);
}
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - cache the key in an appropriate keyring
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria is searched for in the process's
+ * keyrings and returned with its usage count incremented if found. Otherwise,
+ * if callout_info is not NULL, a key will be allocated and some service
+ * (probably in userspace) will be asked to instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
@@ -524,8 +564,16 @@ error:
return key;
}
-/*
- * wait for construction of a key to complete
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
*/
int wait_for_key_construction(struct key *key, bool intr)
{
@@ -542,12 +590,19 @@ int wait_for_key_construction(struct key *key, bool intr)
}
EXPORT_SYMBOL(wait_for_key_construction);
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key(struct key_type *type,
const char *description,
@@ -572,12 +627,19 @@ struct key *request_key(struct key_type *type,
}
EXPORT_SYMBOL(request_key);
-/*
- * request a key with auxiliary data for the upcaller
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
@@ -602,10 +664,18 @@ struct key *request_key_with_auxdata(struct key_type *type,
EXPORT_SYMBOL(request_key_with_auxdata);
/*
- * request a key (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * request_key_async - Request a key (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota and
+ * no auxiliary data can be passed.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async(struct key_type *type,
const char *description,
@@ -620,9 +690,17 @@ EXPORT_SYMBOL(request_key_async);
/*
* request a key with auxiliary data for the upcaller (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async_with_auxdata(struct key_type *type,
const char *description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 86747151ee5b..68164031a74e 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -1,4 +1,4 @@
-/* request_key_auth.c: request key authorisation controlling key def
+/* Request key authorisation token key definition.
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -26,7 +26,7 @@ static void request_key_auth_destroy(struct key *);
static long request_key_auth_read(const struct key *, char __user *, size_t);
/*
- * the request-key authorisation key type definition
+ * The request-key authorisation key type definition.
*/
struct key_type key_type_request_key_auth = {
.name = ".request_key_auth",
@@ -38,9 +38,8 @@ struct key_type key_type_request_key_auth = {
.read = request_key_auth_read,
};
-/*****************************************************************************/
/*
- * instantiate a request-key authorisation key
+ * Instantiate a request-key authorisation key.
*/
static int request_key_auth_instantiate(struct key *key,
const void *data,
@@ -48,12 +47,10 @@ static int request_key_auth_instantiate(struct key *key,
{
key->payload.data = (struct request_key_auth *) data;
return 0;
+}
-} /* end request_key_auth_instantiate() */
-
-/*****************************************************************************/
/*
- * reading a request-key authorisation key retrieves the callout information
+ * Describe an authorisation token.
*/
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
@@ -63,12 +60,10 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
-} /* end request_key_auth_describe() */
-
-/*****************************************************************************/
/*
- * read the callout_info data
+ * Read the callout_info data (retrieves the callout information).
* - the key's semaphore is read-locked
*/
static long request_key_auth_read(const struct key *key,
@@ -91,13 +86,12 @@ static long request_key_auth_read(const struct key *key,
}
return ret;
+}
-} /* end request_key_auth_read() */
-
-/*****************************************************************************/
/*
- * handle revocation of an authorisation token key
- * - called with the key sem write-locked
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
*/
static void request_key_auth_revoke(struct key *key)
{
@@ -109,12 +103,10 @@ static void request_key_auth_revoke(struct key *key)
put_cred(rka->cred);
rka->cred = NULL;
}
+}
-} /* end request_key_auth_revoke() */
-
-/*****************************************************************************/
/*
- * destroy an instantiation authorisation token key
+ * Destroy an instantiation authorisation token key.
*/
static void request_key_auth_destroy(struct key *key)
{
@@ -131,13 +123,11 @@ static void request_key_auth_destroy(struct key *key)
key_put(rka->dest_keyring);
kfree(rka->callout_info);
kfree(rka);
+}
-} /* end request_key_auth_destroy() */
-
-/*****************************************************************************/
/*
- * create an authorisation token for /sbin/request-key or whoever to gain
- * access to the caller's security data
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
*/
struct key *request_key_auth_new(struct key *target, const void *callout_info,
size_t callout_len, struct key *dest_keyring)
@@ -228,12 +218,10 @@ error_alloc:
kfree(rka);
kleave("= %d", ret);
return ERR_PTR(ret);
+}
-} /* end request_key_auth_new() */
-
-/*****************************************************************************/
/*
- * see if an authorisation key is associated with a particular key
+ * See if an authorisation key is associated with a particular key.
*/
static int key_get_instantiation_authkey_match(const struct key *key,
const void *_id)
@@ -242,16 +230,11 @@ static int key_get_instantiation_authkey_match(const struct key *key,
key_serial_t id = (key_serial_t)(unsigned long) _id;
return rka->target_key->serial == id;
+}
-} /* end key_get_instantiation_authkey_match() */
-
-/*****************************************************************************/
/*
- * get the authorisation key for instantiation of a specific key if attached to
- * the current process's keyrings
- * - this key is inserted into a keyring and that is set as /sbin/request-key's
- * session keyring
- * - a target_id of zero specifies any valid token
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
@@ -278,5 +261,4 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
error:
return authkey;
-
-} /* end key_get_instantiation_authkey() */
+}
diff --git a/security/keys/trusted_defined.c b/security/keys/trusted.c
index 975e9f29a52c..83fc92e297cd 100644
--- a/security/keys/trusted_defined.c
+++ b/security/keys/trusted.c
@@ -29,7 +29,7 @@
#include <linux/tpm.h>
#include <linux/tpm_command.h>
-#include "trusted_defined.h"
+#include "trusted.h"
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
@@ -101,11 +101,13 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
if (dlen == 0)
break;
data = va_arg(argp, unsigned char *);
- if (data == NULL)
- return -EINVAL;
+ if (data == NULL) {
+ ret = -EINVAL;
+ break;
+ }
ret = crypto_shash_update(&sdesc->shash, data, dlen);
if (ret < 0)
- goto out;
+ break;
}
va_end(argp);
if (!ret)
@@ -146,14 +148,17 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
if (dlen == 0)
break;
data = va_arg(argp, unsigned char *);
- ret = crypto_shash_update(&sdesc->shash, data, dlen);
- if (ret < 0) {
- va_end(argp);
- goto out;
+ if (!data) {
+ ret = -EINVAL;
+ break;
}
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
}
va_end(argp);
- ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (!ret)
ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
paramdigest, TPM_NONCE_SIZE, h1,
@@ -222,13 +227,12 @@ static int TSS_checkhmac1(unsigned char *buffer,
break;
dpos = va_arg(argp, unsigned int);
ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
- if (ret < 0) {
- va_end(argp);
- goto out;
- }
+ if (ret < 0)
+ break;
}
va_end(argp);
- ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (ret < 0)
goto out;
@@ -316,13 +320,12 @@ static int TSS_checkhmac2(unsigned char *buffer,
break;
dpos = va_arg(argp, unsigned int);
ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
- if (ret < 0) {
- va_end(argp);
- goto out;
- }
+ if (ret < 0)
+ break;
}
va_end(argp);
- ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
if (ret < 0)
goto out;
@@ -511,7 +514,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
/* get session for sealing key */
ret = osap(tb, &sess, keyauth, keytype, keyhandle);
if (ret < 0)
- return ret;
+ goto out;
dump_sess(&sess);
/* calculate encrypted authorization value */
@@ -519,11 +522,11 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
if (ret < 0)
- return ret;
+ goto out;
ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
if (ret < 0)
- return ret;
+ goto out;
ordinal = htonl(TPM_ORD_SEAL);
datsize = htonl(datalen);
pcrsize = htonl(pcrinfosize);
@@ -552,7 +555,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
&datsize, datalen, data, 0, 0);
}
if (ret < 0)
- return ret;
+ goto out;
/* build and send the TPM request packet */
INIT_BUF(tb);
@@ -572,7 +575,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
if (ret < 0)
- return ret;
+ goto out;
/* calculate the size of the returned Blob */
sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
@@ -591,6 +594,8 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
*bloblen = storedsize;
}
+out:
+ kfree(td);
return ret;
}
@@ -1027,6 +1032,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
+ kfree(new_p);
goto out;
}
/* copy old key values, and reseal with new pcrs */
diff --git a/security/keys/trusted_defined.h b/security/keys/trusted.h
index 3249fbd2b653..3249fbd2b653 100644
--- a/security/keys/trusted_defined.h
+++ b/security/keys/trusted.h
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index e9aa07929656..02807fb16340 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -35,7 +35,6 @@ struct key_type key_type_user = {
EXPORT_SYMBOL_GPL(key_type_user);
-/*****************************************************************************/
/*
* instantiate a user defined key
*/
@@ -65,12 +64,10 @@ int user_instantiate(struct key *key, const void *data, size_t datalen)
error:
return ret;
-
-} /* end user_instantiate() */
+}
EXPORT_SYMBOL_GPL(user_instantiate);
-/*****************************************************************************/
/*
* dispose of the old data from an updated user defined key
*/
@@ -81,10 +78,8 @@ static void user_update_rcu_disposal(struct rcu_head *rcu)
upayload = container_of(rcu, struct user_key_payload, rcu);
kfree(upayload);
+}
-} /* end user_update_rcu_disposal() */
-
-/*****************************************************************************/
/*
* update a user defined key
* - the key's semaphore is write-locked
@@ -123,24 +118,20 @@ int user_update(struct key *key, const void *data, size_t datalen)
error:
return ret;
-
-} /* end user_update() */
+}
EXPORT_SYMBOL_GPL(user_update);
-/*****************************************************************************/
/*
* match users on their name
*/
int user_match(const struct key *key, const void *description)
{
return strcmp(key->description, description) == 0;
-
-} /* end user_match() */
+}
EXPORT_SYMBOL_GPL(user_match);
-/*****************************************************************************/
/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
@@ -156,12 +147,10 @@ void user_revoke(struct key *key)
rcu_assign_pointer(key->payload.data, NULL);
call_rcu(&upayload->rcu, user_update_rcu_disposal);
}
-
-} /* end user_revoke() */
+}
EXPORT_SYMBOL(user_revoke);
-/*****************************************************************************/
/*
* dispose of the data dangling from the corpse of a user key
*/
@@ -170,12 +159,10 @@ void user_destroy(struct key *key)
struct user_key_payload *upayload = key->payload.data;
kfree(upayload);
-
-} /* end user_destroy() */
+}
EXPORT_SYMBOL_GPL(user_destroy);
-/*****************************************************************************/
/*
* describe the user key
*/
@@ -184,12 +171,10 @@ void user_describe(const struct key *key, struct seq_file *m)
seq_puts(m, key->description);
seq_printf(m, ": %u", key->datalen);
-
-} /* end user_describe() */
+}
EXPORT_SYMBOL_GPL(user_describe);
-/*****************************************************************************/
/*
* read the key data
* - the key's semaphore is read-locked
@@ -213,7 +198,6 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
}
return ret;
-
-} /* end user_read() */
+}
EXPORT_SYMBOL_GPL(user_read);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c3f845cbcd48..a53373207fb4 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -178,7 +178,7 @@ int cond_init_bool_indexes(struct policydb *p)
p->bool_val_to_struct = (struct cond_bool_datum **)
kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
if (!p->bool_val_to_struct)
- return -1;
+ return -ENOMEM;
return 0;
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index be9de3872837..57363562f0f8 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -501,8 +501,8 @@ static int policydb_index(struct policydb *p)
if (rc)
goto out;
- rc = -ENOMEM;
- if (cond_init_bool_indexes(p))
+ rc = cond_init_bool_indexes(p);
+ if (rc)
goto out;
for (i = 0; i < SYM_NUM; i++) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 269dbff70b92..be4df4c6fd56 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1721,7 +1721,9 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
{
struct alc_spec *spec = codec->spec;
int id = spec->fixup_id;
+#ifdef CONFIG_SND_DEBUG_VERBOSE
const char *modelname = spec->fixup_name;
+#endif
int depth = 0;
if (!spec->fixup_list)
@@ -10930,9 +10932,6 @@ static int alc_auto_add_mic_boost(struct hda_codec *codec)
return 0;
}
-static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
- const struct auto_pin_cfg *cfg);
-
/* almost identical with ALC880 parser... */
static int alc882_parse_auto_config(struct hda_codec *codec)
{
@@ -10950,10 +10949,7 @@ static int alc882_parse_auto_config(struct hda_codec *codec)
err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
if (err < 0)
return err;
- if (codec->vendor_id == 0x10ec0887)
- err = alc861vd_auto_create_multi_out_ctls(spec, &spec->autocfg);
- else
- err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
+ err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
if (err < 0)
return err;
err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
@@ -12635,6 +12631,8 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
ALC262_HP_BPC),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1300, "HP xw series",
ALC262_HP_BPC),
+ SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
+ ALC262_HP_BPC),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
ALC262_HP_BPC),
SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -14957,6 +14955,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -17134,7 +17133,7 @@ static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
#define alc861vd_idx_to_mixer_switch(nid) ((nid) + 0x0c)
/* add playback controls from the parsed DAC table */
-/* Based on ALC880 version. But ALC861VD and ALC887 have separate,
+/* Based on ALC880 version. But ALC861VD has separate,
* different NIDs for mute/unmute switch and volume control */
static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
const struct auto_pin_cfg *cfg)
@@ -19461,6 +19460,7 @@ enum {
ALC662_FIXUP_ASPIRE,
ALC662_FIXUP_IDEAPAD,
ALC272_FIXUP_MARIO,
+ ALC662_FIXUP_CZC_P10T,
};
static const struct alc_fixup alc662_fixups[] = {
@@ -19481,7 +19481,14 @@ static const struct alc_fixup alc662_fixups[] = {
[ALC272_FIXUP_MARIO] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc272_fixup_mario,
- }
+ },
+ [ALC662_FIXUP_CZC_P10T] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+ {}
+ }
+ },
};
static struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -19489,6 +19496,7 @@ static struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
{}
};
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 7b62de089fee..20c6b079d0df 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -580,6 +580,7 @@ static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice)
{
int err;
struct snd_akm4xxx *ak;
+ unsigned char tmp;
if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DELTA1010 &&
ice->eeprom.gpiodir == 0x7b)
@@ -622,6 +623,12 @@ static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice)
break;
}
+ /* initialize the SPI clock to high */
+ tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA);
+ tmp |= ICE1712_DELTA_AP_CCLK;
+ snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp);
+ udelay(5);
+
/* initialize spdif */
switch (ice->eeprom.subvendor) {
case ICE1712_SUBDEVICE_AUDIOPHILE:
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 3abeeddc67d3..ae403597fd31 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -1,6 +1,7 @@
config SND_BF5XX_I2S
tristate "SoC I2S Audio for the ADI BF5xx chip"
depends on BLACKFIN
+ select SND_BF5XX_SOC_SPORT
help
Say Y or M if you want to add support for codecs attached to
the Blackfin SPORT (synchronous serial ports) interface in I2S
@@ -35,6 +36,7 @@ config SND_BFIN_AD73311_SE
config SND_BF5XX_TDM
tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
depends on (BLACKFIN && SND_SOC)
+ select SND_BF5XX_SOC_SPORT
help
Say Y or M if you want to add support for codecs attached to
the Blackfin SPORT (synchronous serial ports) interface in TDM
@@ -61,6 +63,10 @@ config SND_BF5XX_SOC_AD193X
config SND_BF5XX_AC97
tristate "SoC AC97 Audio for the ADI BF5xx chip"
depends on BLACKFIN
+ select AC97_BUS
+ select SND_SOC_AC97_BUS
+ select SND_BF5XX_SOC_SPORT
+ select SND_BF5XX_SOC_AC97
help
Say Y or M if you want to add support for codecs attached to
the Blackfin SPORT (synchronous serial ports) interface in slot 16
@@ -122,17 +128,12 @@ config SND_BF5XX_SOC_SPORT
config SND_BF5XX_SOC_I2S
tristate
- select SND_BF5XX_SOC_SPORT
config SND_BF5XX_SOC_TDM
tristate
- select SND_BF5XX_SOC_SPORT
config SND_BF5XX_SOC_AC97
tristate
- select AC97_BUS
- select SND_SOC_AC97_BUS
- select SND_BF5XX_SOC_SPORT
config SND_BF5XX_SPORT_NUM
int "Set a SPORT for Sound chip"
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index c5f856ec27ca..ffbac26b9bce 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,9 +260,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai)
pr_debug("%s : sport %d\n", __func__, dai->id);
if (!dai->active)
return 0;
- if (dai->capture.active)
+ if (dai->capture_active)
sport_rx_stop(sport);
- if (dai->playback.active)
+ if (dai->playback_active)
sport_tx_stop(sport);
return 0;
}
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
index 125123929f16..5515ac9e05c7 100644
--- a/sound/soc/blackfin/bf5xx-tdm.c
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -210,7 +210,7 @@ static int bf5xx_tdm_set_channel_map(struct snd_soc_dai *dai,
#ifdef CONFIG_PM
static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
{
- struct sport_device *sport = dai->private_data;
+ struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
if (!dai->active)
return 0;
@@ -235,13 +235,13 @@ static int bf5xx_tdm_resume(struct snd_soc_dai *dai)
ret = -EBUSY;
}
- ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0);
+ ret = sport_config_rx(sport, 0, 0x1F, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
}
- ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0);
+ ret = sport_config_tx(sport, 0, 0x1F, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
@@ -303,14 +303,14 @@ static int __devinit bfin_tdm_probe(struct platform_device *pdev)
goto sport_config_err;
}
- ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0);
+ ret = sport_config_rx(sport_handle, 0, 0x1F, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
goto sport_config_err;
}
- ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0);
+ ret = sport_config_tx(sport_handle, 0, 0x1F, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 2d4f896d7fec..3ceaef68e01d 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -104,6 +104,7 @@ static struct snd_soc_jack_gpio hs_jack_gpios[] = {
.name = "hsdet-gpio",
.report = SND_JACK_HEADSET,
.debounce_time = 200,
+ .invert = 1,
},
};
@@ -192,7 +193,7 @@ static struct snd_soc_dai_link z2_dai = {
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "wm8750-hifi",
.platform_name = "pxa-pcm-audio",
- .codec_name = "wm8750-codec.0-001a",
+ .codec_name = "wm8750-codec.0-001b",
.init = z2_wm8750_init,
.ops = &z2_ops,
};
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 2b5387d53ba5..7141c42e1469 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -204,13 +204,11 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
@@ -294,6 +292,13 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
CFLAGS := $(CFLAGS) -fstack-protector-all
endif
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y)
+ CFLAGS := $(CFLAGS) -Wstack-protector
+endif
+
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y)
+ CFLAGS := $(CFLAGS) -Wvolatile-register-var
+endif
### --- END CONFIGURATION SECTION ---
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index c056cdc06912..8879463807e4 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -212,7 +212,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename)
continue;
offset = start + i;
- sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+ sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
fp = popen(cmd, "r");
if (!fp)
continue;
@@ -270,9 +270,9 @@ static void hist_entry__print_hits(struct hist_entry *self)
for (offset = 0; offset < len; ++offset)
if (h->ip[offset] != 0)
- printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+ printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
sym->start + offset, h->ip[offset]);
- printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+ printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
}
static int hist_entry__tty_annotate(struct hist_entry *he)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index def7ddc2fd4f..d97256d65980 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -371,10 +371,10 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
addr = data->ptr;
if (sym != NULL)
- snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
+ snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
addr - map->unmap_ip(map, sym->start));
else
- snprintf(buf, sizeof(buf), "%#Lx", addr);
+ snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
printf(" %-34s |", buf);
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b9c6e5432971..2b36defc5d73 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -782,9 +782,9 @@ static void print_result(void)
pr_info("%10u ", st->nr_acquired);
pr_info("%10u ", st->nr_contended);
- pr_info("%15llu ", st->wait_time_total);
- pr_info("%15llu ", st->wait_time_max);
- pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ pr_info("%15" PRIu64 " ", st->wait_time_total);
+ pr_info("%15" PRIu64 " ", st->wait_time_max);
+ pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
0 : st->wait_time_min);
pr_info("\n");
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index fcd29e8af29f..b2f729fdb317 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -817,7 +817,7 @@ static int __cmd_record(int argc, const char **argv)
* Approximate RIP event size: 24 bytes.
*/
fprintf(stderr,
- "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
+ "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
(double)bytes_written / 1024.0 / 1024.0,
output_name,
bytes_written / 24);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 75183a4518e6..c27e31f289e6 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -197,7 +197,7 @@ static int process_read_event(event_t *event, struct sample_data *sample __used,
event->read.value);
}
- dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
+ dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
attr ? __event_name(attr->type, attr->config) : "FAIL",
event->read.value);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 29e7ffd85690..29acb894e035 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -193,7 +193,7 @@ static void calibrate_run_measurement_overhead(void)
}
run_measurement_overhead = min_delta;
- printf("run measurement overhead: %Ld nsecs\n", min_delta);
+ printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
static void calibrate_sleep_measurement_overhead(void)
@@ -211,7 +211,7 @@ static void calibrate_sleep_measurement_overhead(void)
min_delta -= 10000;
sleep_measurement_overhead = min_delta;
- printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
+ printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
static struct sched_atom *
@@ -617,13 +617,13 @@ static void test_calibrations(void)
burn_nsecs(1e6);
T1 = get_nsecs();
- printf("the run test took %Ld nsecs\n", T1-T0);
+ printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
T0 = get_nsecs();
sleep_nsecs(1e6);
T1 = get_nsecs();
- printf("the sleep test took %Ld nsecs\n", T1-T0);
+ printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
}
#define FILL_FIELD(ptr, field, event, data) \
@@ -816,10 +816,10 @@ replay_switch_event(struct trace_switch_event *switch_event,
delta = 0;
if (delta < 0)
- die("hm, delta: %Ld < 0 ?\n", delta);
+ die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
if (verbose) {
- printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
+ printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
switch_event->prev_comm, switch_event->prev_pid,
switch_event->next_comm, switch_event->next_pid,
delta);
@@ -1048,7 +1048,7 @@ latency_switch_event(struct trace_switch_event *switch_event,
delta = 0;
if (delta < 0)
- die("hm, delta: %Ld < 0 ?\n", delta);
+ die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1221,7 +1221,7 @@ static void output_lat_thread(struct work_atoms *work_list)
avg = work_list->total_lat / work_list->nb_atoms;
- printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+ printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
(double)work_list->total_runtime / 1e6,
work_list->nb_atoms, (double)avg / 1e6,
(double)work_list->max_lat / 1e6,
@@ -1423,7 +1423,7 @@ map_switch_event(struct trace_switch_event *switch_event,
delta = 0;
if (delta < 0)
- die("hm, delta: %Ld < 0 ?\n", delta);
+ die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1713,7 +1713,7 @@ static void __cmd_lat(void)
}
printf(" -----------------------------------------------------------------------------------------\n");
- printf(" TOTAL: |%11.3f ms |%9Ld |\n",
+ printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
(double)all_runtime/1e6, all_count);
printf(" ---------------------------------------------------\n");
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 150a606002eb..b766c2a9ac97 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -77,8 +77,8 @@ static int process_sample_event(event_t *event, struct sample_data *sample,
if (session->sample_type & PERF_SAMPLE_RAW) {
if (debug_mode) {
if (sample->time < last_timestamp) {
- pr_err("Samples misordered, previous: %llu "
- "this: %llu\n", last_timestamp,
+ pr_err("Samples misordered, previous: %" PRIu64
+ " this: %" PRIu64 "\n", last_timestamp,
sample->time);
nr_unordered++;
}
@@ -126,7 +126,7 @@ static int __cmd_script(struct perf_session *session)
ret = perf_session__process_events(session, &event_ops);
if (debug_mode)
- pr_err("Misordered timestamps: %llu\n", nr_unordered);
+ pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
return ret;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0ff11d9b13be..a482a191a0ca 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -206,8 +206,8 @@ static int read_counter_aggr(struct perf_evsel *counter)
update_stats(&ps->res_stats[i], count[i]);
if (verbose) {
- fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
- count[0], count[1], count[2]);
+ fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+ event_name(counter), count[0], count[1], count[2]);
}
/*
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index ed5696198d3d..5dcdba653d70 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -146,7 +146,7 @@ next_pair:
if (llabs(skew) < page_size)
continue;
- pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+ pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
sym->start, sym->name, sym->end, pair->end);
} else {
struct rb_node *nnd;
@@ -168,11 +168,11 @@ detour:
goto detour;
}
- pr_debug("%#Lx: diff name v: %s k: %s\n",
+ pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
sym->start, sym->name, pair->name);
}
} else
- pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+ pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
err = -1;
}
@@ -211,10 +211,10 @@ detour:
if (pair->start == pos->start) {
pair->priv = 1;
- pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+ pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
pos->start, pos->end, pos->pgoff, pos->dso->name);
if (pos->pgoff != pair->pgoff || pos->end != pair->end)
- pr_info(": \n*%Lx-%Lx %Lx",
+ pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
pair->start, pair->end, pair->pgoff);
pr_info(" %s\n", pair->dso->name);
pair->priv = 1;
@@ -307,7 +307,7 @@ static int test__open_syscall_event(void)
}
if (evsel->counts->cpu[0].val != nr_open_calls) {
- pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
+ pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
nr_open_calls, evsel->counts->cpu[0].val);
goto out_close_fd;
}
@@ -332,8 +332,7 @@ static int test__open_syscall_event_on_all_cpus(void)
struct perf_evsel *evsel;
struct perf_event_attr attr;
unsigned int nr_open_calls = 111, i;
- cpu_set_t *cpu_set;
- size_t cpu_set_size;
+ cpu_set_t cpu_set;
int id = trace_event__id("sys_enter_open");
if (id < 0) {
@@ -353,13 +352,8 @@ static int test__open_syscall_event_on_all_cpus(void)
return -1;
}
- cpu_set = CPU_ALLOC(cpus->nr);
- if (cpu_set == NULL)
- goto out_thread_map_delete;
-
- cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
- CPU_ZERO_S(cpu_set_size, cpu_set);
+ CPU_ZERO(&cpu_set);
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_TRACEPOINT;
@@ -367,7 +361,7 @@ static int test__open_syscall_event_on_all_cpus(void)
evsel = perf_evsel__new(&attr, 0);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
- goto out_cpu_free;
+ goto out_thread_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -379,14 +373,29 @@ static int test__open_syscall_event_on_all_cpus(void)
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int ncalls = nr_open_calls + cpu;
+ /*
+ * XXX eventually lift this restriction in a way that
+ * keeps perf building on older glibc installations
+ * without CPU_ALLOC. 1024 cpus in 2010 still seems
+ * a reasonable upper limit tho :-)
+ */
+ if (cpus->map[cpu] >= CPU_SETSIZE) {
+ pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+ continue;
+ }
- CPU_SET(cpu, cpu_set);
- sched_setaffinity(0, cpu_set_size, cpu_set);
+ CPU_SET(cpus->map[cpu], &cpu_set);
+ if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+ pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+ cpus->map[cpu],
+ strerror(errno));
+ goto out_close_fd;
+ }
for (i = 0; i < ncalls; ++i) {
fd = open("/etc/passwd", O_RDONLY);
close(fd);
}
- CPU_CLR(cpu, cpu_set);
+ CPU_CLR(cpus->map[cpu], &cpu_set);
}
/*
@@ -402,6 +411,9 @@ static int test__open_syscall_event_on_all_cpus(void)
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int expected;
+ if (cpus->map[cpu] >= CPU_SETSIZE)
+ continue;
+
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
pr_debug("perf_evsel__open_read_on_cpu\n");
goto out_close_fd;
@@ -409,8 +421,8 @@ static int test__open_syscall_event_on_all_cpus(void)
expected = nr_open_calls + cpu;
if (evsel->counts->cpu[cpu].val != expected) {
- pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
- expected, cpu, evsel->counts->cpu[cpu].val);
+ pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+ expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
goto out_close_fd;
}
}
@@ -420,8 +432,6 @@ out_close_fd:
perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
perf_evsel__delete(evsel);
-out_cpu_free:
- CPU_FREE(cpu_set);
out_thread_map_delete:
thread_map__delete(threads);
return err;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 05344c6210ac..b6998e055767 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -40,6 +40,7 @@
#include <stdio.h>
#include <termios.h>
#include <unistd.h>
+#include <inttypes.h>
#include <errno.h>
#include <time.h>
@@ -214,7 +215,7 @@ static int parse_source(struct sym_entry *syme)
len = sym->end - sym->start;
sprintf(command,
- "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+ "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
@@ -308,7 +309,7 @@ static void lookup_sym_source(struct sym_entry *syme)
struct source_line *line;
char pattern[PATTERN_LEN + 1];
- sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+ sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
map__rip_2objdump(syme->map, symbol->start));
pthread_mutex_lock(&syme->src->lock);
@@ -537,7 +538,7 @@ static void print_sym_table(void)
if (nr_counters == 1 || !display_weighted) {
struct perf_evsel *first;
first = list_entry(evsel_list.next, struct perf_evsel, node);
- printf("%Ld", first->attr.sample_period);
+ printf("%" PRIu64, (uint64_t)first->attr.sample_period);
if (freq)
printf("Hz ");
else
@@ -640,7 +641,7 @@ static void print_sym_table(void)
percent_color_fprintf(stdout, "%4.1f%%", pcnt);
if (verbose)
- printf(" %016llx", sym->start);
+ printf(" %016" PRIx64, sym->start);
printf(" %-*.*s", sym_width, sym_width, sym->name);
printf(" %-*.*s\n", dso_width, dso_width,
dso_width >= syme->map->dso->long_name_len ?
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 2302ec051bb4..1478ab4ee222 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -459,7 +459,8 @@ int event__process_comm(event_t *self, struct sample_data *sample __used,
int event__process_lost(event_t *self, struct sample_data *sample __used,
struct perf_session *session)
{
- dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
+ dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+ self->lost.id, self->lost.lost);
session->hists.stats.total_lost += self->lost.lost;
return 0;
}
@@ -575,7 +576,7 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used,
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
int ret = 0;
- dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+ dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
self->mmap.pid, self->mmap.tid, self->mmap.start,
self->mmap.len, self->mmap.pgoff, self->mmap.filename);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 989fa2dee2fd..f6a929e74981 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -798,8 +798,8 @@ static int perf_file_section__process(struct perf_file_section *self,
int feat, int fd)
{
if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
- pr_debug("Failed to lseek to %Ld offset for feature %d, "
- "continuing...\n", self->offset, feat);
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", self->offset, feat);
return 0;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index c749ba6136a0..32f4f1f2f6e4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -636,13 +636,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
}
}
} else
- ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
+ ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
if (symbol_conf.show_nr_samples) {
if (sep)
- ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+ ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
else
- ret += snprintf(s + ret, size - ret, "%11lld", period);
+ ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
}
if (pair_hists) {
@@ -971,7 +971,7 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
sym_size = sym->end - sym->start;
offset = ip - sym->start;
- pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+ pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
if (offset >= sym_size)
return 0;
@@ -980,8 +980,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
h->sum++;
h->ip[offset]++;
- pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
- self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
+ pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
+ "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
+ ip, ip - self->ms.sym->start, h->ip[offset]);
return 0;
}
@@ -1132,7 +1133,7 @@ fallback:
goto out_free_filename;
}
- pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -1142,7 +1143,7 @@ fallback:
dso, dso->long_name, sym, sym->name);
snprintf(command, sizeof(command),
- "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
+ "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
symfs_filename, filename);
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 8be0b968ca0b..305c8484f200 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -2,6 +2,7 @@
#define _PERF_LINUX_BITOPS_H_
#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <asm/hweight.h>
#define BITS_PER_LONG __WORDSIZE
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3a7eb6ec0eec..a16ecab5229d 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,5 +1,6 @@
#include "symbol.h"
#include <errno.h>
+#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
@@ -195,7 +196,7 @@ int map__overlap(struct map *l, struct map *r)
size_t map__fprintf(struct map *self, FILE *fp)
{
- return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
self->start, self->end, self->pgoff, self->dso->name);
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index bc2732ee23eb..135f69baf966 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -279,7 +279,7 @@ const char *__event_name(int type, u64 config)
static char buf[32];
if (type == PERF_TYPE_RAW) {
- sprintf(buf, "raw 0x%llx", config);
+ sprintf(buf, "raw 0x%" PRIx64, config);
return buf;
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b82cafb83772..458e3ecf17af 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@ struct tracepoint_path {
};
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
-extern bool have_tracepoints(struct list_head *evsel_list);
+extern bool have_tracepoints(struct list_head *evlist);
extern int nr_counters;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 128aaab0aeda..6e29d9c9dccc 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -172,7 +172,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
sym = __find_kernel_function_by_name(tp->symbol, &map);
if (sym) {
addr = map->unmap_ip(map, sym->start + tp->offset);
- pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+ pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
tp->offset, addr);
ret = find_perf_probe_point((unsigned long)addr, pp);
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 313dac2d94ce..105f00bfd555 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -652,10 +652,11 @@ static void callchain__printf(struct sample_data *sample)
{
unsigned int i;
- printf("... chain: nr:%Lu\n", sample->callchain->nr);
+ printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
for (i = 0; i < sample->callchain->nr; i++)
- printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
+ printf("..... %2d: %016" PRIx64 "\n",
+ i, sample->callchain->ips[i]);
}
static void perf_session__print_tstamp(struct perf_session *session,
@@ -672,7 +673,7 @@ static void perf_session__print_tstamp(struct perf_session *session,
printf("%u ", sample->cpu);
if (session->sample_type & PERF_SAMPLE_TIME)
- printf("%Lu ", sample->time);
+ printf("%" PRIu64 " ", sample->time);
}
static void dump_event(struct perf_session *session, event_t *event,
@@ -681,16 +682,16 @@ static void dump_event(struct perf_session *session, event_t *event,
if (!dump_trace)
return;
- printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size,
- event->header.type);
+ printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+ file_offset, event->header.size, event->header.type);
trace_event(event);
if (sample)
perf_session__print_tstamp(session, event, sample);
- printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size,
- event__get_event_name(event->header.type));
+ printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+ event->header.size, event__get_event_name(event->header.type));
}
static void dump_sample(struct perf_session *session, event_t *event,
@@ -699,8 +700,9 @@ static void dump_sample(struct perf_session *session, event_t *event,
if (!dump_trace)
return;
- printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
- sample->pid, sample->tid, sample->ip, sample->period);
+ printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
+ event->header.misc, sample->pid, sample->tid, sample->ip,
+ sample->period);
if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
@@ -843,8 +845,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
{
if (ops->lost == event__process_lost &&
session->hists.stats.total_lost != 0) {
- ui__warning("Processed %Lu events and LOST %Lu!\n\n"
- "Check IO/CPU overload!\n\n",
+ ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
+ "!\n\nCheck IO/CPU overload!\n\n",
session->hists.stats.total_period,
session->hists.stats.total_lost);
}
@@ -918,7 +920,7 @@ more:
if (size == 0 ||
(skip = perf_session__process_event(self, &event, ops, head)) < 0) {
- dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
head, event.header.size, event.header.type);
/*
* assume we lost track of the stream, check alignment, and
@@ -1023,7 +1025,7 @@ more:
if (size == 0 ||
perf_session__process_event(session, event, ops, file_pos) < 0) {
- dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
file_offset + head, event->header.size,
event->header.type);
/*
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index b3637db025a2..fb737fe9be91 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -12,6 +12,7 @@
* of the License.
*/
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -43,11 +44,11 @@ static double cpu2y(int cpu)
return cpu2slot(cpu) * SLOT_MULT;
}
-static double time2pixels(u64 time)
+static double time2pixels(u64 __time)
{
double X;
- X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+ X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
return X;
}
@@ -94,7 +95,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
- fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+ fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
@@ -483,7 +484,7 @@ void svg_time_grid(void)
color = 128;
}
- fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
i += 10000000;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 15ccfba8cdf8..7821d0e6866f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -11,6 +11,7 @@
#include <sys/param.h>
#include <fcntl.h>
#include <unistd.h>
+#include <inttypes.h>
#include "build-id.h"
#include "debug.h"
#include "symbol.h"
@@ -153,7 +154,7 @@ static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
self->binding = binding;
self->namelen = namelen - 1;
- pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+ pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end);
memcpy(self->name, name, namelen);
@@ -167,7 +168,7 @@ void symbol__delete(struct symbol *self)
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
{
- return fprintf(fp, " %llx-%llx %c %s\n",
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
self->start, self->end,
self->binding == STB_GLOBAL ? 'g' :
self->binding == STB_LOCAL ? 'l' : 'w',
@@ -1161,6 +1162,13 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
section_name = elf_sec__name(&shdr, secstrs);
+ /* On ARM, symbols for thumb functions have 1 added to
+ * the symbol address as a flag - remove it */
+ if ((ehdr.e_machine == EM_ARM) &&
+ (map->type == MAP__FUNCTION) &&
+ (sym.st_value & 1))
+ --sym.st_value;
+
if (self->kernel != DSO_TYPE_USER || kmodule) {
char dso_name[PATH_MAX];
@@ -1208,8 +1216,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
}
if (curr_dso->adjust_symbols) {
- pr_debug4("%s: adjusting symbol: st_value: %#Lx "
- "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
(u64)sym.st_value, (u64)shdr.sh_addr,
(u64)shdr.sh_offset);
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
index 7d6b8331f898..5f3689a3d085 100644
--- a/tools/perf/util/types.h
+++ b/tools/perf/util/types.h
@@ -1,12 +1,14 @@
#ifndef __PERF_TYPES_H
#define __PERF_TYPES_H
+#include <stdint.h>
+
/*
- * We define u64 as unsigned long long for every architecture
- * so that we can print it with %Lx without getting warnings.
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
*/
-typedef unsigned long long u64;
-typedef signed long long s64;
+typedef uint64_t u64;
+typedef int64_t s64;
typedef unsigned int u32;
typedef signed int s32;
typedef unsigned short u16;
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index ebda8c3fde9e..60c463c16028 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -350,7 +350,7 @@ static char *callchain_list__sym_name(struct callchain_list *self,
if (self->ms.sym)
return self->ms.sym->name;
- snprintf(bf, bfsize, "%#Lx", self->ip);
+ snprintf(bf, bfsize, "%#" PRIx64, self->ip);
return bf;
}
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
index e35437dfa5b4..e5158369106e 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/util/ui/browsers/map.c
@@ -1,5 +1,6 @@
#include "../libslang.h"
#include <elf.h>
+#include <inttypes.h>
#include <sys/ttydefaults.h>
#include <ctype.h>
#include <string.h>
@@ -57,7 +58,7 @@ static void map_browser__write(struct ui_browser *self, void *nd, int row)
int width;
ui_browser__set_percent_color(self, 0, current_entry);
- slsmg_printf("%*llx %*llx %c ",
+ slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
mb->addrlen, sym->start, mb->addrlen, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w');
@@ -150,6 +151,6 @@ int map__browse(struct map *self)
++mb.b.nr_entries;
}
- mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
+ mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
return map_browser__run(&mb);
}
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index cfa55d686e3b..bdd33470b235 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -150,7 +150,7 @@ static void perf_read_values__display_pretty(FILE *fp,
if (width > tidwidth)
tidwidth = width;
for (j = 0; j < values->counters; j++) {
- width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > counterwidth[j])
counterwidth[j] = width;
}
@@ -165,7 +165,7 @@ static void perf_read_values__display_pretty(FILE *fp,
fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
tidwidth, values->tid[i]);
for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*Lu",
+ fprintf(fp, " %*" PRIu64,
counterwidth[j], values->value[i][j]);
fprintf(fp, "\n");
}
@@ -196,13 +196,13 @@ static void perf_read_values__display_raw(FILE *fp,
width = strlen(values->countername[j]);
if (width > namewidth)
namewidth = width;
- width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+ width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
if (width > rawwidth)
rawwidth = width;
}
for (i = 0; i < values->threads; i++) {
for (j = 0; j < values->counters; j++) {
- width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > countwidth)
countwidth = width;
}
@@ -214,7 +214,7 @@ static void perf_read_values__display_raw(FILE *fp,
countwidth, "Count");
for (i = 0; i < values->threads; i++)
for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*d %*d %*s %*llx %*Lu\n",
+ fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64,
pidwidth, values->pid[i],
tidwidth, values->tid[i],
namewidth, values->countername[j],
diff --git a/usr/Kconfig b/usr/Kconfig
index 4780deac5974..65b845bd4e3e 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -46,7 +46,7 @@ config INITRAMFS_ROOT_GID
If you are not sure, leave it set to "0".
config RD_GZIP
- bool "Support initial ramdisks compressed using gzip" if EMBEDDED
+ bool "Support initial ramdisks compressed using gzip" if EXPERT
default y
depends on BLK_DEV_INITRD
select DECOMPRESS_GZIP
@@ -55,8 +55,8 @@ config RD_GZIP
If unsure, say Y.
config RD_BZIP2
- bool "Support initial ramdisks compressed using bzip2" if EMBEDDED
- default !EMBEDDED
+ bool "Support initial ramdisks compressed using bzip2" if EXPERT
+ default !EXPERT
depends on BLK_DEV_INITRD
select DECOMPRESS_BZIP2
help
@@ -64,8 +64,8 @@ config RD_BZIP2
If unsure, say N.
config RD_LZMA
- bool "Support initial ramdisks compressed using LZMA" if EMBEDDED
- default !EMBEDDED
+ bool "Support initial ramdisks compressed using LZMA" if EXPERT
+ default !EXPERT
depends on BLK_DEV_INITRD
select DECOMPRESS_LZMA
help
@@ -73,8 +73,8 @@ config RD_LZMA
If unsure, say N.
config RD_XZ
- bool "Support initial ramdisks compressed using XZ" if EMBEDDED
- default !EMBEDDED
+ bool "Support initial ramdisks compressed using XZ" if EXPERT
+ default !EXPERT
depends on BLK_DEV_INITRD
select DECOMPRESS_XZ
help
@@ -82,8 +82,8 @@ config RD_XZ
If unsure, say N.
config RD_LZO
- bool "Support initial ramdisks compressed using LZO" if EMBEDDED
- default !EMBEDDED
+ bool "Support initial ramdisks compressed using LZO" if EXPERT
+ default !EXPERT
depends on BLK_DEV_INITRD
select DECOMPRESS_LZO
help